]> Pileus Git - ~andy/linux/blob - drivers/net/bnx2.c
[BNX2]: Add bnx2_5706_serdes_timer().
[~andy/linux] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.4.45"
60 #define DRV_MODULE_RELDATE      "September 29, 2006"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { 0, }
119 };
120
121 static struct flash_spec flash_table[] =
122 {
123         /* Slow EEPROM */
124         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127          "EEPROM - slow"},
128         /* Expansion entry 0001 */
129         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132          "Entry 0001"},
133         /* Saifun SA25F010 (non-buffered flash) */
134         /* strap, cfg1, & write1 need updates */
135         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138          "Non-buffered flash (128kB)"},
139         /* Saifun SA25F020 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144          "Non-buffered flash (256kB)"},
145         /* Expansion entry 0100 */
146         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149          "Entry 0100"},
150         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160         /* Saifun SA25F005 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165          "Non-buffered flash (64kB)"},
166         /* Fast EEPROM */
167         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170          "EEPROM - fast"},
171         /* Expansion entry 1001 */
172         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175          "Entry 1001"},
176         /* Expansion entry 1010 */
177         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1010"},
181         /* ATMEL AT45DB011B (buffered flash) */
182         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185          "Buffered flash (128kB)"},
186         /* Expansion entry 1100 */
187         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1100"},
191         /* Expansion entry 1101 */
192         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1101"},
196         /* Ateml Expansion entry 1110 */
197         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1110 (Atmel)"},
201         /* ATMEL AT45DB021B (buffered flash) */
202         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205          "Buffered flash (256kB)"},
206 };
207
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211 {
212         u32 diff;
213
214         smp_mb();
215         diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216         if (diff > MAX_TX_DESC_CNT)
217                 diff = (diff & MAX_TX_DESC_CNT) - 1;
218         return (bp->tx_ring_size - diff);
219 }
220
221 static u32
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223 {
224         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226 }
227
228 static void
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230 {
231         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233 }
234
235 static void
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237 {
238         offset += cid_addr;
239         REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240         REG_WR(bp, BNX2_CTX_DATA, val);
241 }
242
243 static int
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245 {
246         u32 val1;
247         int i, ret;
248
249         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256                 udelay(40);
257         }
258
259         val1 = (bp->phy_addr << 21) | (reg << 16) |
260                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261                 BNX2_EMAC_MDIO_COMM_START_BUSY;
262         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264         for (i = 0; i < 50; i++) {
265                 udelay(10);
266
267                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269                         udelay(5);
270
271                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274                         break;
275                 }
276         }
277
278         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279                 *val = 0x0;
280                 ret = -EBUSY;
281         }
282         else {
283                 *val = val1;
284                 ret = 0;
285         }
286
287         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294                 udelay(40);
295         }
296
297         return ret;
298 }
299
300 static int
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302 {
303         u32 val1;
304         int i, ret;
305
306         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313                 udelay(40);
314         }
315
316         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
320
321         for (i = 0; i < 50; i++) {
322                 udelay(10);
323
324                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326                         udelay(5);
327                         break;
328                 }
329         }
330
331         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332                 ret = -EBUSY;
333         else
334                 ret = 0;
335
336         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343                 udelay(40);
344         }
345
346         return ret;
347 }
348
349 static void
350 bnx2_disable_int(struct bnx2 *bp)
351 {
352         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355 }
356
357 static void
358 bnx2_enable_int(struct bnx2 *bp)
359 {
360         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
364         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
367         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
368 }
369
370 static void
371 bnx2_disable_int_sync(struct bnx2 *bp)
372 {
373         atomic_inc(&bp->intr_sem);
374         bnx2_disable_int(bp);
375         synchronize_irq(bp->pdev->irq);
376 }
377
378 static void
379 bnx2_netif_stop(struct bnx2 *bp)
380 {
381         bnx2_disable_int_sync(bp);
382         if (netif_running(bp->dev)) {
383                 netif_poll_disable(bp->dev);
384                 netif_tx_disable(bp->dev);
385                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386         }
387 }
388
389 static void
390 bnx2_netif_start(struct bnx2 *bp)
391 {
392         if (atomic_dec_and_test(&bp->intr_sem)) {
393                 if (netif_running(bp->dev)) {
394                         netif_wake_queue(bp->dev);
395                         netif_poll_enable(bp->dev);
396                         bnx2_enable_int(bp);
397                 }
398         }
399 }
400
401 static void
402 bnx2_free_mem(struct bnx2 *bp)
403 {
404         int i;
405
406         if (bp->status_blk) {
407                 pci_free_consistent(bp->pdev, bp->status_stats_size,
408                                     bp->status_blk, bp->status_blk_mapping);
409                 bp->status_blk = NULL;
410                 bp->stats_blk = NULL;
411         }
412         if (bp->tx_desc_ring) {
413                 pci_free_consistent(bp->pdev,
414                                     sizeof(struct tx_bd) * TX_DESC_CNT,
415                                     bp->tx_desc_ring, bp->tx_desc_mapping);
416                 bp->tx_desc_ring = NULL;
417         }
418         kfree(bp->tx_buf_ring);
419         bp->tx_buf_ring = NULL;
420         for (i = 0; i < bp->rx_max_ring; i++) {
421                 if (bp->rx_desc_ring[i])
422                         pci_free_consistent(bp->pdev,
423                                             sizeof(struct rx_bd) * RX_DESC_CNT,
424                                             bp->rx_desc_ring[i],
425                                             bp->rx_desc_mapping[i]);
426                 bp->rx_desc_ring[i] = NULL;
427         }
428         vfree(bp->rx_buf_ring);
429         bp->rx_buf_ring = NULL;
430 }
431
432 static int
433 bnx2_alloc_mem(struct bnx2 *bp)
434 {
435         int i, status_blk_size;
436
437         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438                                   GFP_KERNEL);
439         if (bp->tx_buf_ring == NULL)
440                 return -ENOMEM;
441
442         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443                                                 sizeof(struct tx_bd) *
444                                                 TX_DESC_CNT,
445                                                 &bp->tx_desc_mapping);
446         if (bp->tx_desc_ring == NULL)
447                 goto alloc_mem_err;
448
449         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450                                   bp->rx_max_ring);
451         if (bp->rx_buf_ring == NULL)
452                 goto alloc_mem_err;
453
454         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455                                    bp->rx_max_ring);
456
457         for (i = 0; i < bp->rx_max_ring; i++) {
458                 bp->rx_desc_ring[i] =
459                         pci_alloc_consistent(bp->pdev,
460                                              sizeof(struct rx_bd) * RX_DESC_CNT,
461                                              &bp->rx_desc_mapping[i]);
462                 if (bp->rx_desc_ring[i] == NULL)
463                         goto alloc_mem_err;
464
465         }
466
467         /* Combine status and statistics blocks into one allocation. */
468         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469         bp->status_stats_size = status_blk_size +
470                                 sizeof(struct statistics_block);
471
472         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473                                               &bp->status_blk_mapping);
474         if (bp->status_blk == NULL)
475                 goto alloc_mem_err;
476
477         memset(bp->status_blk, 0, bp->status_stats_size);
478
479         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480                                   status_blk_size);
481
482         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
483
484         return 0;
485
486 alloc_mem_err:
487         bnx2_free_mem(bp);
488         return -ENOMEM;
489 }
490
491 static void
492 bnx2_report_fw_link(struct bnx2 *bp)
493 {
494         u32 fw_link_status = 0;
495
496         if (bp->link_up) {
497                 u32 bmsr;
498
499                 switch (bp->line_speed) {
500                 case SPEED_10:
501                         if (bp->duplex == DUPLEX_HALF)
502                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
503                         else
504                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
505                         break;
506                 case SPEED_100:
507                         if (bp->duplex == DUPLEX_HALF)
508                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
509                         else
510                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
511                         break;
512                 case SPEED_1000:
513                         if (bp->duplex == DUPLEX_HALF)
514                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515                         else
516                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517                         break;
518                 case SPEED_2500:
519                         if (bp->duplex == DUPLEX_HALF)
520                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521                         else
522                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523                         break;
524                 }
525
526                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528                 if (bp->autoneg) {
529                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
532                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537                         else
538                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539                 }
540         }
541         else
542                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545 }
546
547 static void
548 bnx2_report_link(struct bnx2 *bp)
549 {
550         if (bp->link_up) {
551                 netif_carrier_on(bp->dev);
552                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554                 printk("%d Mbps ", bp->line_speed);
555
556                 if (bp->duplex == DUPLEX_FULL)
557                         printk("full duplex");
558                 else
559                         printk("half duplex");
560
561                 if (bp->flow_ctrl) {
562                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
563                                 printk(", receive ");
564                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
565                                         printk("& transmit ");
566                         }
567                         else {
568                                 printk(", transmit ");
569                         }
570                         printk("flow control ON");
571                 }
572                 printk("\n");
573         }
574         else {
575                 netif_carrier_off(bp->dev);
576                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577         }
578
579         bnx2_report_fw_link(bp);
580 }
581
582 static void
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584 {
585         u32 local_adv, remote_adv;
586
587         bp->flow_ctrl = 0;
588         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591                 if (bp->duplex == DUPLEX_FULL) {
592                         bp->flow_ctrl = bp->req_flow_ctrl;
593                 }
594                 return;
595         }
596
597         if (bp->duplex != DUPLEX_FULL) {
598                 return;
599         }
600
601         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603                 u32 val;
604
605                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607                         bp->flow_ctrl |= FLOW_CTRL_TX;
608                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609                         bp->flow_ctrl |= FLOW_CTRL_RX;
610                 return;
611         }
612
613         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614         bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616         if (bp->phy_flags & PHY_SERDES_FLAG) {
617                 u32 new_local_adv = 0;
618                 u32 new_remote_adv = 0;
619
620                 if (local_adv & ADVERTISE_1000XPAUSE)
621                         new_local_adv |= ADVERTISE_PAUSE_CAP;
622                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
624                 if (remote_adv & ADVERTISE_1000XPAUSE)
625                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
626                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629                 local_adv = new_local_adv;
630                 remote_adv = new_remote_adv;
631         }
632
633         /* See Table 28B-3 of 802.3ab-1999 spec. */
634         if (local_adv & ADVERTISE_PAUSE_CAP) {
635                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
637                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638                         }
639                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640                                 bp->flow_ctrl = FLOW_CTRL_RX;
641                         }
642                 }
643                 else {
644                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
645                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646                         }
647                 }
648         }
649         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653                         bp->flow_ctrl = FLOW_CTRL_TX;
654                 }
655         }
656 }
657
658 static int
659 bnx2_5708s_linkup(struct bnx2 *bp)
660 {
661         u32 val;
662
663         bp->link_up = 1;
664         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666                 case BCM5708S_1000X_STAT1_SPEED_10:
667                         bp->line_speed = SPEED_10;
668                         break;
669                 case BCM5708S_1000X_STAT1_SPEED_100:
670                         bp->line_speed = SPEED_100;
671                         break;
672                 case BCM5708S_1000X_STAT1_SPEED_1G:
673                         bp->line_speed = SPEED_1000;
674                         break;
675                 case BCM5708S_1000X_STAT1_SPEED_2G5:
676                         bp->line_speed = SPEED_2500;
677                         break;
678         }
679         if (val & BCM5708S_1000X_STAT1_FD)
680                 bp->duplex = DUPLEX_FULL;
681         else
682                 bp->duplex = DUPLEX_HALF;
683
684         return 0;
685 }
686
687 static int
688 bnx2_5706s_linkup(struct bnx2 *bp)
689 {
690         u32 bmcr, local_adv, remote_adv, common;
691
692         bp->link_up = 1;
693         bp->line_speed = SPEED_1000;
694
695         bnx2_read_phy(bp, MII_BMCR, &bmcr);
696         if (bmcr & BMCR_FULLDPLX) {
697                 bp->duplex = DUPLEX_FULL;
698         }
699         else {
700                 bp->duplex = DUPLEX_HALF;
701         }
702
703         if (!(bmcr & BMCR_ANENABLE)) {
704                 return 0;
705         }
706
707         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708         bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710         common = local_adv & remote_adv;
711         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713                 if (common & ADVERTISE_1000XFULL) {
714                         bp->duplex = DUPLEX_FULL;
715                 }
716                 else {
717                         bp->duplex = DUPLEX_HALF;
718                 }
719         }
720
721         return 0;
722 }
723
724 static int
725 bnx2_copper_linkup(struct bnx2 *bp)
726 {
727         u32 bmcr;
728
729         bnx2_read_phy(bp, MII_BMCR, &bmcr);
730         if (bmcr & BMCR_ANENABLE) {
731                 u32 local_adv, remote_adv, common;
732
733                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736                 common = local_adv & (remote_adv >> 2);
737                 if (common & ADVERTISE_1000FULL) {
738                         bp->line_speed = SPEED_1000;
739                         bp->duplex = DUPLEX_FULL;
740                 }
741                 else if (common & ADVERTISE_1000HALF) {
742                         bp->line_speed = SPEED_1000;
743                         bp->duplex = DUPLEX_HALF;
744                 }
745                 else {
746                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749                         common = local_adv & remote_adv;
750                         if (common & ADVERTISE_100FULL) {
751                                 bp->line_speed = SPEED_100;
752                                 bp->duplex = DUPLEX_FULL;
753                         }
754                         else if (common & ADVERTISE_100HALF) {
755                                 bp->line_speed = SPEED_100;
756                                 bp->duplex = DUPLEX_HALF;
757                         }
758                         else if (common & ADVERTISE_10FULL) {
759                                 bp->line_speed = SPEED_10;
760                                 bp->duplex = DUPLEX_FULL;
761                         }
762                         else if (common & ADVERTISE_10HALF) {
763                                 bp->line_speed = SPEED_10;
764                                 bp->duplex = DUPLEX_HALF;
765                         }
766                         else {
767                                 bp->line_speed = 0;
768                                 bp->link_up = 0;
769                         }
770                 }
771         }
772         else {
773                 if (bmcr & BMCR_SPEED100) {
774                         bp->line_speed = SPEED_100;
775                 }
776                 else {
777                         bp->line_speed = SPEED_10;
778                 }
779                 if (bmcr & BMCR_FULLDPLX) {
780                         bp->duplex = DUPLEX_FULL;
781                 }
782                 else {
783                         bp->duplex = DUPLEX_HALF;
784                 }
785         }
786
787         return 0;
788 }
789
790 static int
791 bnx2_set_mac_link(struct bnx2 *bp)
792 {
793         u32 val;
794
795         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797                 (bp->duplex == DUPLEX_HALF)) {
798                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799         }
800
801         /* Configure the EMAC mode register. */
802         val = REG_RD(bp, BNX2_EMAC_MODE);
803
804         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806                 BNX2_EMAC_MODE_25G);
807
808         if (bp->link_up) {
809                 switch (bp->line_speed) {
810                         case SPEED_10:
811                                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812                                         val |= BNX2_EMAC_MODE_PORT_MII_10;
813                                         break;
814                                 }
815                                 /* fall through */
816                         case SPEED_100:
817                                 val |= BNX2_EMAC_MODE_PORT_MII;
818                                 break;
819                         case SPEED_2500:
820                                 val |= BNX2_EMAC_MODE_25G;
821                                 /* fall through */
822                         case SPEED_1000:
823                                 val |= BNX2_EMAC_MODE_PORT_GMII;
824                                 break;
825                 }
826         }
827         else {
828                 val |= BNX2_EMAC_MODE_PORT_GMII;
829         }
830
831         /* Set the MAC to operate in the appropriate duplex mode. */
832         if (bp->duplex == DUPLEX_HALF)
833                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834         REG_WR(bp, BNX2_EMAC_MODE, val);
835
836         /* Enable/disable rx PAUSE. */
837         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839         if (bp->flow_ctrl & FLOW_CTRL_RX)
840                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843         /* Enable/disable tx PAUSE. */
844         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847         if (bp->flow_ctrl & FLOW_CTRL_TX)
848                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851         /* Acknowledge the interrupt. */
852         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854         return 0;
855 }
856
857 static int
858 bnx2_set_link(struct bnx2 *bp)
859 {
860         u32 bmsr;
861         u8 link_up;
862
863         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
864                 bp->link_up = 1;
865                 return 0;
866         }
867
868         link_up = bp->link_up;
869
870         bnx2_read_phy(bp, MII_BMSR, &bmsr);
871         bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875                 u32 val;
876
877                 val = REG_RD(bp, BNX2_EMAC_STATUS);
878                 if (val & BNX2_EMAC_STATUS_LINK)
879                         bmsr |= BMSR_LSTATUS;
880                 else
881                         bmsr &= ~BMSR_LSTATUS;
882         }
883
884         if (bmsr & BMSR_LSTATUS) {
885                 bp->link_up = 1;
886
887                 if (bp->phy_flags & PHY_SERDES_FLAG) {
888                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
889                                 bnx2_5706s_linkup(bp);
890                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891                                 bnx2_5708s_linkup(bp);
892                 }
893                 else {
894                         bnx2_copper_linkup(bp);
895                 }
896                 bnx2_resolve_flow_ctrl(bp);
897         }
898         else {
899                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900                         (bp->autoneg & AUTONEG_SPEED)) {
901
902                         u32 bmcr;
903
904                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
905                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
906                         if (!(bmcr & BMCR_ANENABLE)) {
907                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
908                                         BMCR_ANENABLE);
909                         }
910                 }
911                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912                 bp->link_up = 0;
913         }
914
915         if (bp->link_up != link_up) {
916                 bnx2_report_link(bp);
917         }
918
919         bnx2_set_mac_link(bp);
920
921         return 0;
922 }
923
924 static int
925 bnx2_reset_phy(struct bnx2 *bp)
926 {
927         int i;
928         u32 reg;
929
930         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932 #define PHY_RESET_MAX_WAIT 100
933         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934                 udelay(10);
935
936                 bnx2_read_phy(bp, MII_BMCR, &reg);
937                 if (!(reg & BMCR_RESET)) {
938                         udelay(20);
939                         break;
940                 }
941         }
942         if (i == PHY_RESET_MAX_WAIT) {
943                 return -EBUSY;
944         }
945         return 0;
946 }
947
948 static u32
949 bnx2_phy_get_pause_adv(struct bnx2 *bp)
950 {
951         u32 adv = 0;
952
953         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956                 if (bp->phy_flags & PHY_SERDES_FLAG) {
957                         adv = ADVERTISE_1000XPAUSE;
958                 }
959                 else {
960                         adv = ADVERTISE_PAUSE_CAP;
961                 }
962         }
963         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964                 if (bp->phy_flags & PHY_SERDES_FLAG) {
965                         adv = ADVERTISE_1000XPSE_ASYM;
966                 }
967                 else {
968                         adv = ADVERTISE_PAUSE_ASYM;
969                 }
970         }
971         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972                 if (bp->phy_flags & PHY_SERDES_FLAG) {
973                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974                 }
975                 else {
976                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977                 }
978         }
979         return adv;
980 }
981
982 static int
983 bnx2_setup_serdes_phy(struct bnx2 *bp)
984 {
985         u32 adv, bmcr, up1;
986         u32 new_adv = 0;
987
988         if (!(bp->autoneg & AUTONEG_SPEED)) {
989                 u32 new_bmcr;
990                 int force_link_down = 0;
991
992                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997                 new_bmcr |= BMCR_SPEED1000;
998                 if (bp->req_line_speed == SPEED_2500) {
999                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001                         if (!(up1 & BCM5708S_UP1_2G5)) {
1002                                 up1 |= BCM5708S_UP1_2G5;
1003                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004                                 force_link_down = 1;
1005                         }
1006                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1007                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008                         if (up1 & BCM5708S_UP1_2G5) {
1009                                 up1 &= ~BCM5708S_UP1_2G5;
1010                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011                                 force_link_down = 1;
1012                         }
1013                 }
1014
1015                 if (bp->req_duplex == DUPLEX_FULL) {
1016                         adv |= ADVERTISE_1000XFULL;
1017                         new_bmcr |= BMCR_FULLDPLX;
1018                 }
1019                 else {
1020                         adv |= ADVERTISE_1000XHALF;
1021                         new_bmcr &= ~BMCR_FULLDPLX;
1022                 }
1023                 if ((new_bmcr != bmcr) || (force_link_down)) {
1024                         /* Force a link down visible on the other side */
1025                         if (bp->link_up) {
1026                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027                                                ~(ADVERTISE_1000XFULL |
1028                                                  ADVERTISE_1000XHALF));
1029                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030                                         BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032                                 bp->link_up = 0;
1033                                 netif_carrier_off(bp->dev);
1034                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1035                                 bnx2_report_link(bp);
1036                         }
1037                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1038                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039                 }
1040                 return 0;
1041         }
1042
1043         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045                 up1 |= BCM5708S_UP1_2G5;
1046                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047         }
1048
1049         if (bp->advertising & ADVERTISED_1000baseT_Full)
1050                 new_adv |= ADVERTISE_1000XFULL;
1051
1052         new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057         bp->serdes_an_pending = 0;
1058         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059                 /* Force a link down visible on the other side */
1060                 if (bp->link_up) {
1061                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1062                         spin_unlock_bh(&bp->phy_lock);
1063                         msleep(20);
1064                         spin_lock_bh(&bp->phy_lock);
1065                 }
1066
1067                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069                         BMCR_ANENABLE);
1070                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1071                         /* Speed up link-up time when the link partner
1072                          * does not autonegotiate which is very common
1073                          * in blade servers. Some blade servers use
1074                          * IPMI for kerboard input and it's important
1075                          * to minimize link disruptions. Autoneg. involves
1076                          * exchanging base pages plus 3 next pages and
1077                          * normally completes in about 120 msec.
1078                          */
1079                         bp->current_interval = SERDES_AN_TIMEOUT;
1080                         bp->serdes_an_pending = 1;
1081                         mod_timer(&bp->timer, jiffies + bp->current_interval);
1082                 }
1083         }
1084
1085         return 0;
1086 }
1087
1088 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1089         (ADVERTISED_1000baseT_Full)
1090
1091 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1092         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1093         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1094         ADVERTISED_1000baseT_Full)
1095
1096 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1097         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1098
1099 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1100
1101 static int
1102 bnx2_setup_copper_phy(struct bnx2 *bp)
1103 {
1104         u32 bmcr;
1105         u32 new_bmcr;
1106
1107         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1108
1109         if (bp->autoneg & AUTONEG_SPEED) {
1110                 u32 adv_reg, adv1000_reg;
1111                 u32 new_adv_reg = 0;
1112                 u32 new_adv1000_reg = 0;
1113
1114                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1115                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1116                         ADVERTISE_PAUSE_ASYM);
1117
1118                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1119                 adv1000_reg &= PHY_ALL_1000_SPEED;
1120
1121                 if (bp->advertising & ADVERTISED_10baseT_Half)
1122                         new_adv_reg |= ADVERTISE_10HALF;
1123                 if (bp->advertising & ADVERTISED_10baseT_Full)
1124                         new_adv_reg |= ADVERTISE_10FULL;
1125                 if (bp->advertising & ADVERTISED_100baseT_Half)
1126                         new_adv_reg |= ADVERTISE_100HALF;
1127                 if (bp->advertising & ADVERTISED_100baseT_Full)
1128                         new_adv_reg |= ADVERTISE_100FULL;
1129                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1130                         new_adv1000_reg |= ADVERTISE_1000FULL;
1131
1132                 new_adv_reg |= ADVERTISE_CSMA;
1133
1134                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1135
1136                 if ((adv1000_reg != new_adv1000_reg) ||
1137                         (adv_reg != new_adv_reg) ||
1138                         ((bmcr & BMCR_ANENABLE) == 0)) {
1139
1140                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1141                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1142                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1143                                 BMCR_ANENABLE);
1144                 }
1145                 else if (bp->link_up) {
1146                         /* Flow ctrl may have changed from auto to forced */
1147                         /* or vice-versa. */
1148
1149                         bnx2_resolve_flow_ctrl(bp);
1150                         bnx2_set_mac_link(bp);
1151                 }
1152                 return 0;
1153         }
1154
1155         new_bmcr = 0;
1156         if (bp->req_line_speed == SPEED_100) {
1157                 new_bmcr |= BMCR_SPEED100;
1158         }
1159         if (bp->req_duplex == DUPLEX_FULL) {
1160                 new_bmcr |= BMCR_FULLDPLX;
1161         }
1162         if (new_bmcr != bmcr) {
1163                 u32 bmsr;
1164                 int i = 0;
1165
1166                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1168
1169                 if (bmsr & BMSR_LSTATUS) {
1170                         /* Force link down */
1171                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1172                         do {
1173                                 udelay(100);
1174                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1175                                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1176                                 i++;
1177                         } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1178                 }
1179
1180                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1181
1182                 /* Normally, the new speed is setup after the link has
1183                  * gone down and up again. In some cases, link will not go
1184                  * down so we need to set up the new speed here.
1185                  */
1186                 if (bmsr & BMSR_LSTATUS) {
1187                         bp->line_speed = bp->req_line_speed;
1188                         bp->duplex = bp->req_duplex;
1189                         bnx2_resolve_flow_ctrl(bp);
1190                         bnx2_set_mac_link(bp);
1191                 }
1192         }
1193         return 0;
1194 }
1195
1196 static int
1197 bnx2_setup_phy(struct bnx2 *bp)
1198 {
1199         if (bp->loopback == MAC_LOOPBACK)
1200                 return 0;
1201
1202         if (bp->phy_flags & PHY_SERDES_FLAG) {
1203                 return (bnx2_setup_serdes_phy(bp));
1204         }
1205         else {
1206                 return (bnx2_setup_copper_phy(bp));
1207         }
1208 }
1209
1210 static int
1211 bnx2_init_5708s_phy(struct bnx2 *bp)
1212 {
1213         u32 val;
1214
1215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1216         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1217         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1218
1219         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1220         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1221         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1222
1223         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1224         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1225         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1226
1227         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1228                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1229                 val |= BCM5708S_UP1_2G5;
1230                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1231         }
1232
1233         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1234             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1235             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1236                 /* increase tx signal amplitude */
1237                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1238                                BCM5708S_BLK_ADDR_TX_MISC);
1239                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1240                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1241                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1242                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1243         }
1244
1245         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1246               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1247
1248         if (val) {
1249                 u32 is_backplane;
1250
1251                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1252                                           BNX2_SHARED_HW_CFG_CONFIG);
1253                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255                                        BCM5708S_BLK_ADDR_TX_MISC);
1256                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1257                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1258                                        BCM5708S_BLK_ADDR_DIG);
1259                 }
1260         }
1261         return 0;
1262 }
1263
1264 static int
1265 bnx2_init_5706s_phy(struct bnx2 *bp)
1266 {
1267         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1268
1269         if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1270                 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1271         }
1272
1273         if (bp->dev->mtu > 1500) {
1274                 u32 val;
1275
1276                 /* Set extended packet length bit */
1277                 bnx2_write_phy(bp, 0x18, 0x7);
1278                 bnx2_read_phy(bp, 0x18, &val);
1279                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1280
1281                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1282                 bnx2_read_phy(bp, 0x1c, &val);
1283                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1284         }
1285         else {
1286                 u32 val;
1287
1288                 bnx2_write_phy(bp, 0x18, 0x7);
1289                 bnx2_read_phy(bp, 0x18, &val);
1290                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1291
1292                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1293                 bnx2_read_phy(bp, 0x1c, &val);
1294                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1295         }
1296
1297         return 0;
1298 }
1299
1300 static int
1301 bnx2_init_copper_phy(struct bnx2 *bp)
1302 {
1303         u32 val;
1304
1305         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1306
1307         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1308                 bnx2_write_phy(bp, 0x18, 0x0c00);
1309                 bnx2_write_phy(bp, 0x17, 0x000a);
1310                 bnx2_write_phy(bp, 0x15, 0x310b);
1311                 bnx2_write_phy(bp, 0x17, 0x201f);
1312                 bnx2_write_phy(bp, 0x15, 0x9506);
1313                 bnx2_write_phy(bp, 0x17, 0x401f);
1314                 bnx2_write_phy(bp, 0x15, 0x14e2);
1315                 bnx2_write_phy(bp, 0x18, 0x0400);
1316         }
1317
1318         if (bp->dev->mtu > 1500) {
1319                 /* Set extended packet length bit */
1320                 bnx2_write_phy(bp, 0x18, 0x7);
1321                 bnx2_read_phy(bp, 0x18, &val);
1322                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1323
1324                 bnx2_read_phy(bp, 0x10, &val);
1325                 bnx2_write_phy(bp, 0x10, val | 0x1);
1326         }
1327         else {
1328                 bnx2_write_phy(bp, 0x18, 0x7);
1329                 bnx2_read_phy(bp, 0x18, &val);
1330                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332                 bnx2_read_phy(bp, 0x10, &val);
1333                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1334         }
1335
1336         /* ethernet@wirespeed */
1337         bnx2_write_phy(bp, 0x18, 0x7007);
1338         bnx2_read_phy(bp, 0x18, &val);
1339         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1340         return 0;
1341 }
1342
1343
1344 static int
1345 bnx2_init_phy(struct bnx2 *bp)
1346 {
1347         u32 val;
1348         int rc = 0;
1349
1350         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1351         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1352
1353         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1354
1355         bnx2_reset_phy(bp);
1356
1357         bnx2_read_phy(bp, MII_PHYSID1, &val);
1358         bp->phy_id = val << 16;
1359         bnx2_read_phy(bp, MII_PHYSID2, &val);
1360         bp->phy_id |= val & 0xffff;
1361
1362         if (bp->phy_flags & PHY_SERDES_FLAG) {
1363                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1364                         rc = bnx2_init_5706s_phy(bp);
1365                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1366                         rc = bnx2_init_5708s_phy(bp);
1367         }
1368         else {
1369                 rc = bnx2_init_copper_phy(bp);
1370         }
1371
1372         bnx2_setup_phy(bp);
1373
1374         return rc;
1375 }
1376
1377 static int
1378 bnx2_set_mac_loopback(struct bnx2 *bp)
1379 {
1380         u32 mac_mode;
1381
1382         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1383         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1384         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1385         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1386         bp->link_up = 1;
1387         return 0;
1388 }
1389
1390 static int bnx2_test_link(struct bnx2 *);
1391
1392 static int
1393 bnx2_set_phy_loopback(struct bnx2 *bp)
1394 {
1395         u32 mac_mode;
1396         int rc, i;
1397
1398         spin_lock_bh(&bp->phy_lock);
1399         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1400                             BMCR_SPEED1000);
1401         spin_unlock_bh(&bp->phy_lock);
1402         if (rc)
1403                 return rc;
1404
1405         for (i = 0; i < 10; i++) {
1406                 if (bnx2_test_link(bp) == 0)
1407                         break;
1408                 msleep(100);
1409         }
1410
1411         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1412         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1413                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1414                       BNX2_EMAC_MODE_25G);
1415
1416         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1417         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1418         bp->link_up = 1;
1419         return 0;
1420 }
1421
1422 static int
1423 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1424 {
1425         int i;
1426         u32 val;
1427
1428         bp->fw_wr_seq++;
1429         msg_data |= bp->fw_wr_seq;
1430
1431         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1432
1433         /* wait for an acknowledgement. */
1434         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1435                 msleep(10);
1436
1437                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1438
1439                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1440                         break;
1441         }
1442         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1443                 return 0;
1444
1445         /* If we timed out, inform the firmware that this is the case. */
1446         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1447                 if (!silent)
1448                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1449                                             "%x\n", msg_data);
1450
1451                 msg_data &= ~BNX2_DRV_MSG_CODE;
1452                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1453
1454                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1455
1456                 return -EBUSY;
1457         }
1458
1459         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1460                 return -EIO;
1461
1462         return 0;
1463 }
1464
1465 static void
1466 bnx2_init_context(struct bnx2 *bp)
1467 {
1468         u32 vcid;
1469
1470         vcid = 96;
1471         while (vcid) {
1472                 u32 vcid_addr, pcid_addr, offset;
1473
1474                 vcid--;
1475
1476                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1477                         u32 new_vcid;
1478
1479                         vcid_addr = GET_PCID_ADDR(vcid);
1480                         if (vcid & 0x8) {
1481                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1482                         }
1483                         else {
1484                                 new_vcid = vcid;
1485                         }
1486                         pcid_addr = GET_PCID_ADDR(new_vcid);
1487                 }
1488                 else {
1489                         vcid_addr = GET_CID_ADDR(vcid);
1490                         pcid_addr = vcid_addr;
1491                 }
1492
1493                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1494                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1495
1496                 /* Zero out the context. */
1497                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1498                         CTX_WR(bp, 0x00, offset, 0);
1499                 }
1500
1501                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1502                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1503         }
1504 }
1505
1506 static int
1507 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1508 {
1509         u16 *good_mbuf;
1510         u32 good_mbuf_cnt;
1511         u32 val;
1512
1513         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1514         if (good_mbuf == NULL) {
1515                 printk(KERN_ERR PFX "Failed to allocate memory in "
1516                                     "bnx2_alloc_bad_rbuf\n");
1517                 return -ENOMEM;
1518         }
1519
1520         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1521                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1522
1523         good_mbuf_cnt = 0;
1524
1525         /* Allocate a bunch of mbufs and save the good ones in an array. */
1526         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1527         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1528                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1529
1530                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1531
1532                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1533
1534                 /* The addresses with Bit 9 set are bad memory blocks. */
1535                 if (!(val & (1 << 9))) {
1536                         good_mbuf[good_mbuf_cnt] = (u16) val;
1537                         good_mbuf_cnt++;
1538                 }
1539
1540                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1541         }
1542
1543         /* Free the good ones back to the mbuf pool thus discarding
1544          * all the bad ones. */
1545         while (good_mbuf_cnt) {
1546                 good_mbuf_cnt--;
1547
1548                 val = good_mbuf[good_mbuf_cnt];
1549                 val = (val << 9) | val | 1;
1550
1551                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1552         }
1553         kfree(good_mbuf);
1554         return 0;
1555 }
1556
1557 static void
1558 bnx2_set_mac_addr(struct bnx2 *bp)
1559 {
1560         u32 val;
1561         u8 *mac_addr = bp->dev->dev_addr;
1562
1563         val = (mac_addr[0] << 8) | mac_addr[1];
1564
1565         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1566
1567         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1568                 (mac_addr[4] << 8) | mac_addr[5];
1569
1570         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1571 }
1572
1573 static inline int
1574 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1575 {
1576         struct sk_buff *skb;
1577         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1578         dma_addr_t mapping;
1579         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1580         unsigned long align;
1581
1582         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1583         if (skb == NULL) {
1584                 return -ENOMEM;
1585         }
1586
1587         if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1588                 skb_reserve(skb, 8 - align);
1589         }
1590
1591         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1592                 PCI_DMA_FROMDEVICE);
1593
1594         rx_buf->skb = skb;
1595         pci_unmap_addr_set(rx_buf, mapping, mapping);
1596
1597         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1598         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1599
1600         bp->rx_prod_bseq += bp->rx_buf_use_size;
1601
1602         return 0;
1603 }
1604
1605 static void
1606 bnx2_phy_int(struct bnx2 *bp)
1607 {
1608         u32 new_link_state, old_link_state;
1609
1610         new_link_state = bp->status_blk->status_attn_bits &
1611                 STATUS_ATTN_BITS_LINK_STATE;
1612         old_link_state = bp->status_blk->status_attn_bits_ack &
1613                 STATUS_ATTN_BITS_LINK_STATE;
1614         if (new_link_state != old_link_state) {
1615                 if (new_link_state) {
1616                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1617                                 STATUS_ATTN_BITS_LINK_STATE);
1618                 }
1619                 else {
1620                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1621                                 STATUS_ATTN_BITS_LINK_STATE);
1622                 }
1623                 bnx2_set_link(bp);
1624         }
1625 }
1626
1627 static void
1628 bnx2_tx_int(struct bnx2 *bp)
1629 {
1630         struct status_block *sblk = bp->status_blk;
1631         u16 hw_cons, sw_cons, sw_ring_cons;
1632         int tx_free_bd = 0;
1633
1634         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1635         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1636                 hw_cons++;
1637         }
1638         sw_cons = bp->tx_cons;
1639
1640         while (sw_cons != hw_cons) {
1641                 struct sw_bd *tx_buf;
1642                 struct sk_buff *skb;
1643                 int i, last;
1644
1645                 sw_ring_cons = TX_RING_IDX(sw_cons);
1646
1647                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1648                 skb = tx_buf->skb;
1649 #ifdef BCM_TSO
1650                 /* partial BD completions possible with TSO packets */
1651                 if (skb_is_gso(skb)) {
1652                         u16 last_idx, last_ring_idx;
1653
1654                         last_idx = sw_cons +
1655                                 skb_shinfo(skb)->nr_frags + 1;
1656                         last_ring_idx = sw_ring_cons +
1657                                 skb_shinfo(skb)->nr_frags + 1;
1658                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1659                                 last_idx++;
1660                         }
1661                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1662                                 break;
1663                         }
1664                 }
1665 #endif
1666                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1667                         skb_headlen(skb), PCI_DMA_TODEVICE);
1668
1669                 tx_buf->skb = NULL;
1670                 last = skb_shinfo(skb)->nr_frags;
1671
1672                 for (i = 0; i < last; i++) {
1673                         sw_cons = NEXT_TX_BD(sw_cons);
1674
1675                         pci_unmap_page(bp->pdev,
1676                                 pci_unmap_addr(
1677                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1678                                         mapping),
1679                                 skb_shinfo(skb)->frags[i].size,
1680                                 PCI_DMA_TODEVICE);
1681                 }
1682
1683                 sw_cons = NEXT_TX_BD(sw_cons);
1684
1685                 tx_free_bd += last + 1;
1686
1687                 dev_kfree_skb(skb);
1688
1689                 hw_cons = bp->hw_tx_cons =
1690                         sblk->status_tx_quick_consumer_index0;
1691
1692                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1693                         hw_cons++;
1694                 }
1695         }
1696
1697         bp->tx_cons = sw_cons;
1698         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1699          * before checking for netif_queue_stopped().  Without the
1700          * memory barrier, there is a small possibility that bnx2_start_xmit()
1701          * will miss it and cause the queue to be stopped forever.
1702          */
1703         smp_mb();
1704
1705         if (unlikely(netif_queue_stopped(bp->dev)) &&
1706                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1707                 netif_tx_lock(bp->dev);
1708                 if ((netif_queue_stopped(bp->dev)) &&
1709                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1710                         netif_wake_queue(bp->dev);
1711                 netif_tx_unlock(bp->dev);
1712         }
1713 }
1714
1715 static inline void
1716 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1717         u16 cons, u16 prod)
1718 {
1719         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1720         struct rx_bd *cons_bd, *prod_bd;
1721
1722         cons_rx_buf = &bp->rx_buf_ring[cons];
1723         prod_rx_buf = &bp->rx_buf_ring[prod];
1724
1725         pci_dma_sync_single_for_device(bp->pdev,
1726                 pci_unmap_addr(cons_rx_buf, mapping),
1727                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1728
1729         bp->rx_prod_bseq += bp->rx_buf_use_size;
1730
1731         prod_rx_buf->skb = skb;
1732
1733         if (cons == prod)
1734                 return;
1735
1736         pci_unmap_addr_set(prod_rx_buf, mapping,
1737                         pci_unmap_addr(cons_rx_buf, mapping));
1738
1739         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1740         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1741         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1742         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1743 }
1744
1745 static int
1746 bnx2_rx_int(struct bnx2 *bp, int budget)
1747 {
1748         struct status_block *sblk = bp->status_blk;
1749         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1750         struct l2_fhdr *rx_hdr;
1751         int rx_pkt = 0;
1752
1753         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1754         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1755                 hw_cons++;
1756         }
1757         sw_cons = bp->rx_cons;
1758         sw_prod = bp->rx_prod;
1759
1760         /* Memory barrier necessary as speculative reads of the rx
1761          * buffer can be ahead of the index in the status block
1762          */
1763         rmb();
1764         while (sw_cons != hw_cons) {
1765                 unsigned int len;
1766                 u32 status;
1767                 struct sw_bd *rx_buf;
1768                 struct sk_buff *skb;
1769                 dma_addr_t dma_addr;
1770
1771                 sw_ring_cons = RX_RING_IDX(sw_cons);
1772                 sw_ring_prod = RX_RING_IDX(sw_prod);
1773
1774                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1775                 skb = rx_buf->skb;
1776
1777                 rx_buf->skb = NULL;
1778
1779                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1780
1781                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1782                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1783
1784                 rx_hdr = (struct l2_fhdr *) skb->data;
1785                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1786
1787                 if ((status = rx_hdr->l2_fhdr_status) &
1788                         (L2_FHDR_ERRORS_BAD_CRC |
1789                         L2_FHDR_ERRORS_PHY_DECODE |
1790                         L2_FHDR_ERRORS_ALIGNMENT |
1791                         L2_FHDR_ERRORS_TOO_SHORT |
1792                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1793
1794                         goto reuse_rx;
1795                 }
1796
1797                 /* Since we don't have a jumbo ring, copy small packets
1798                  * if mtu > 1500
1799                  */
1800                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1801                         struct sk_buff *new_skb;
1802
1803                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1804                         if (new_skb == NULL)
1805                                 goto reuse_rx;
1806
1807                         /* aligned copy */
1808                         memcpy(new_skb->data,
1809                                 skb->data + bp->rx_offset - 2,
1810                                 len + 2);
1811
1812                         skb_reserve(new_skb, 2);
1813                         skb_put(new_skb, len);
1814
1815                         bnx2_reuse_rx_skb(bp, skb,
1816                                 sw_ring_cons, sw_ring_prod);
1817
1818                         skb = new_skb;
1819                 }
1820                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1821                         pci_unmap_single(bp->pdev, dma_addr,
1822                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1823
1824                         skb_reserve(skb, bp->rx_offset);
1825                         skb_put(skb, len);
1826                 }
1827                 else {
1828 reuse_rx:
1829                         bnx2_reuse_rx_skb(bp, skb,
1830                                 sw_ring_cons, sw_ring_prod);
1831                         goto next_rx;
1832                 }
1833
1834                 skb->protocol = eth_type_trans(skb, bp->dev);
1835
1836                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1837                         (ntohs(skb->protocol) != 0x8100)) {
1838
1839                         dev_kfree_skb(skb);
1840                         goto next_rx;
1841
1842                 }
1843
1844                 skb->ip_summed = CHECKSUM_NONE;
1845                 if (bp->rx_csum &&
1846                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1847                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1848
1849                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1850                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1851                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1852                 }
1853
1854 #ifdef BCM_VLAN
1855                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1856                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1857                                 rx_hdr->l2_fhdr_vlan_tag);
1858                 }
1859                 else
1860 #endif
1861                         netif_receive_skb(skb);
1862
1863                 bp->dev->last_rx = jiffies;
1864                 rx_pkt++;
1865
1866 next_rx:
1867                 sw_cons = NEXT_RX_BD(sw_cons);
1868                 sw_prod = NEXT_RX_BD(sw_prod);
1869
1870                 if ((rx_pkt == budget))
1871                         break;
1872
1873                 /* Refresh hw_cons to see if there is new work */
1874                 if (sw_cons == hw_cons) {
1875                         hw_cons = bp->hw_rx_cons =
1876                                 sblk->status_rx_quick_consumer_index0;
1877                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1878                                 hw_cons++;
1879                         rmb();
1880                 }
1881         }
1882         bp->rx_cons = sw_cons;
1883         bp->rx_prod = sw_prod;
1884
1885         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1886
1887         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1888
1889         mmiowb();
1890
1891         return rx_pkt;
1892
1893 }
1894
1895 /* MSI ISR - The only difference between this and the INTx ISR
1896  * is that the MSI interrupt is always serviced.
1897  */
1898 static irqreturn_t
1899 bnx2_msi(int irq, void *dev_instance)
1900 {
1901         struct net_device *dev = dev_instance;
1902         struct bnx2 *bp = netdev_priv(dev);
1903
1904         prefetch(bp->status_blk);
1905         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1906                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1907                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1908
1909         /* Return here if interrupt is disabled. */
1910         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1911                 return IRQ_HANDLED;
1912
1913         netif_rx_schedule(dev);
1914
1915         return IRQ_HANDLED;
1916 }
1917
1918 static irqreturn_t
1919 bnx2_interrupt(int irq, void *dev_instance)
1920 {
1921         struct net_device *dev = dev_instance;
1922         struct bnx2 *bp = netdev_priv(dev);
1923
1924         /* When using INTx, it is possible for the interrupt to arrive
1925          * at the CPU before the status block posted prior to the
1926          * interrupt. Reading a register will flush the status block.
1927          * When using MSI, the MSI message will always complete after
1928          * the status block write.
1929          */
1930         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1931             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1932              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1933                 return IRQ_NONE;
1934
1935         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1936                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1937                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1938
1939         /* Return here if interrupt is shared and is disabled. */
1940         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1941                 return IRQ_HANDLED;
1942
1943         netif_rx_schedule(dev);
1944
1945         return IRQ_HANDLED;
1946 }
1947
1948 static inline int
1949 bnx2_has_work(struct bnx2 *bp)
1950 {
1951         struct status_block *sblk = bp->status_blk;
1952
1953         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1954             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1955                 return 1;
1956
1957         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1958             bp->link_up)
1959                 return 1;
1960
1961         return 0;
1962 }
1963
1964 static int
1965 bnx2_poll(struct net_device *dev, int *budget)
1966 {
1967         struct bnx2 *bp = netdev_priv(dev);
1968
1969         if ((bp->status_blk->status_attn_bits &
1970                 STATUS_ATTN_BITS_LINK_STATE) !=
1971                 (bp->status_blk->status_attn_bits_ack &
1972                 STATUS_ATTN_BITS_LINK_STATE)) {
1973
1974                 spin_lock(&bp->phy_lock);
1975                 bnx2_phy_int(bp);
1976                 spin_unlock(&bp->phy_lock);
1977
1978                 /* This is needed to take care of transient status
1979                  * during link changes.
1980                  */
1981                 REG_WR(bp, BNX2_HC_COMMAND,
1982                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1983                 REG_RD(bp, BNX2_HC_COMMAND);
1984         }
1985
1986         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1987                 bnx2_tx_int(bp);
1988
1989         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1990                 int orig_budget = *budget;
1991                 int work_done;
1992
1993                 if (orig_budget > dev->quota)
1994                         orig_budget = dev->quota;
1995
1996                 work_done = bnx2_rx_int(bp, orig_budget);
1997                 *budget -= work_done;
1998                 dev->quota -= work_done;
1999         }
2000
2001         bp->last_status_idx = bp->status_blk->status_idx;
2002         rmb();
2003
2004         if (!bnx2_has_work(bp)) {
2005                 netif_rx_complete(dev);
2006                 if (likely(bp->flags & USING_MSI_FLAG)) {
2007                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2008                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2009                                bp->last_status_idx);
2010                         return 0;
2011                 }
2012                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2014                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2015                        bp->last_status_idx);
2016
2017                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2018                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2019                        bp->last_status_idx);
2020                 return 0;
2021         }
2022
2023         return 1;
2024 }
2025
2026 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2027  * from set_multicast.
2028  */
2029 static void
2030 bnx2_set_rx_mode(struct net_device *dev)
2031 {
2032         struct bnx2 *bp = netdev_priv(dev);
2033         u32 rx_mode, sort_mode;
2034         int i;
2035
2036         spin_lock_bh(&bp->phy_lock);
2037
2038         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2039                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2040         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2041 #ifdef BCM_VLAN
2042         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2043                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2044 #else
2045         if (!(bp->flags & ASF_ENABLE_FLAG))
2046                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2047 #endif
2048         if (dev->flags & IFF_PROMISC) {
2049                 /* Promiscuous mode. */
2050                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2051                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2052                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2053         }
2054         else if (dev->flags & IFF_ALLMULTI) {
2055                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2056                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2057                                0xffffffff);
2058                 }
2059                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2060         }
2061         else {
2062                 /* Accept one or more multicast(s). */
2063                 struct dev_mc_list *mclist;
2064                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2065                 u32 regidx;
2066                 u32 bit;
2067                 u32 crc;
2068
2069                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2070
2071                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2072                      i++, mclist = mclist->next) {
2073
2074                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2075                         bit = crc & 0xff;
2076                         regidx = (bit & 0xe0) >> 5;
2077                         bit &= 0x1f;
2078                         mc_filter[regidx] |= (1 << bit);
2079                 }
2080
2081                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2082                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2083                                mc_filter[i]);
2084                 }
2085
2086                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2087         }
2088
2089         if (rx_mode != bp->rx_mode) {
2090                 bp->rx_mode = rx_mode;
2091                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2092         }
2093
2094         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2095         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2096         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2097
2098         spin_unlock_bh(&bp->phy_lock);
2099 }
2100
2101 #define FW_BUF_SIZE     0x8000
2102
2103 static int
2104 bnx2_gunzip_init(struct bnx2 *bp)
2105 {
2106         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2107                 goto gunzip_nomem1;
2108
2109         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2110                 goto gunzip_nomem2;
2111
2112         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2113         if (bp->strm->workspace == NULL)
2114                 goto gunzip_nomem3;
2115
2116         return 0;
2117
2118 gunzip_nomem3:
2119         kfree(bp->strm);
2120         bp->strm = NULL;
2121
2122 gunzip_nomem2:
2123         vfree(bp->gunzip_buf);
2124         bp->gunzip_buf = NULL;
2125
2126 gunzip_nomem1:
2127         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2128                             "uncompression.\n", bp->dev->name);
2129         return -ENOMEM;
2130 }
2131
2132 static void
2133 bnx2_gunzip_end(struct bnx2 *bp)
2134 {
2135         kfree(bp->strm->workspace);
2136
2137         kfree(bp->strm);
2138         bp->strm = NULL;
2139
2140         if (bp->gunzip_buf) {
2141                 vfree(bp->gunzip_buf);
2142                 bp->gunzip_buf = NULL;
2143         }
2144 }
2145
2146 static int
2147 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2148 {
2149         int n, rc;
2150
2151         /* check gzip header */
2152         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2153                 return -EINVAL;
2154
2155         n = 10;
2156
2157 #define FNAME   0x8
2158         if (zbuf[3] & FNAME)
2159                 while ((zbuf[n++] != 0) && (n < len));
2160
2161         bp->strm->next_in = zbuf + n;
2162         bp->strm->avail_in = len - n;
2163         bp->strm->next_out = bp->gunzip_buf;
2164         bp->strm->avail_out = FW_BUF_SIZE;
2165
2166         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2167         if (rc != Z_OK)
2168                 return rc;
2169
2170         rc = zlib_inflate(bp->strm, Z_FINISH);
2171
2172         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2173         *outbuf = bp->gunzip_buf;
2174
2175         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2176                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2177                        bp->dev->name, bp->strm->msg);
2178
2179         zlib_inflateEnd(bp->strm);
2180
2181         if (rc == Z_STREAM_END)
2182                 return 0;
2183
2184         return rc;
2185 }
2186
2187 static void
2188 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2189         u32 rv2p_proc)
2190 {
2191         int i;
2192         u32 val;
2193
2194
2195         for (i = 0; i < rv2p_code_len; i += 8) {
2196                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2197                 rv2p_code++;
2198                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2199                 rv2p_code++;
2200
2201                 if (rv2p_proc == RV2P_PROC1) {
2202                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2203                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2204                 }
2205                 else {
2206                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2207                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2208                 }
2209         }
2210
2211         /* Reset the processor, un-stall is done later. */
2212         if (rv2p_proc == RV2P_PROC1) {
2213                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2214         }
2215         else {
2216                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2217         }
2218 }
2219
2220 static void
2221 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2222 {
2223         u32 offset;
2224         u32 val;
2225
2226         /* Halt the CPU. */
2227         val = REG_RD_IND(bp, cpu_reg->mode);
2228         val |= cpu_reg->mode_value_halt;
2229         REG_WR_IND(bp, cpu_reg->mode, val);
2230         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2231
2232         /* Load the Text area. */
2233         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2234         if (fw->text) {
2235                 int j;
2236
2237                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2238                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2239                 }
2240         }
2241
2242         /* Load the Data area. */
2243         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2244         if (fw->data) {
2245                 int j;
2246
2247                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2248                         REG_WR_IND(bp, offset, fw->data[j]);
2249                 }
2250         }
2251
2252         /* Load the SBSS area. */
2253         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2254         if (fw->sbss) {
2255                 int j;
2256
2257                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2258                         REG_WR_IND(bp, offset, fw->sbss[j]);
2259                 }
2260         }
2261
2262         /* Load the BSS area. */
2263         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2264         if (fw->bss) {
2265                 int j;
2266
2267                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2268                         REG_WR_IND(bp, offset, fw->bss[j]);
2269                 }
2270         }
2271
2272         /* Load the Read-Only area. */
2273         offset = cpu_reg->spad_base +
2274                 (fw->rodata_addr - cpu_reg->mips_view_base);
2275         if (fw->rodata) {
2276                 int j;
2277
2278                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2279                         REG_WR_IND(bp, offset, fw->rodata[j]);
2280                 }
2281         }
2282
2283         /* Clear the pre-fetch instruction. */
2284         REG_WR_IND(bp, cpu_reg->inst, 0);
2285         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2286
2287         /* Start the CPU. */
2288         val = REG_RD_IND(bp, cpu_reg->mode);
2289         val &= ~cpu_reg->mode_value_halt;
2290         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2291         REG_WR_IND(bp, cpu_reg->mode, val);
2292 }
2293
2294 static int
2295 bnx2_init_cpus(struct bnx2 *bp)
2296 {
2297         struct cpu_reg cpu_reg;
2298         struct fw_info fw;
2299         int rc = 0;
2300         void *text;
2301         u32 text_len;
2302
2303         if ((rc = bnx2_gunzip_init(bp)) != 0)
2304                 return rc;
2305
2306         /* Initialize the RV2P processor. */
2307         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2308                          &text_len);
2309         if (rc)
2310                 goto init_cpu_err;
2311
2312         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2313
2314         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2315                          &text_len);
2316         if (rc)
2317                 goto init_cpu_err;
2318
2319         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2320
2321         /* Initialize the RX Processor. */
2322         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2323         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2324         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2325         cpu_reg.state = BNX2_RXP_CPU_STATE;
2326         cpu_reg.state_value_clear = 0xffffff;
2327         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2328         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2329         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2330         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2331         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2332         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2333         cpu_reg.mips_view_base = 0x8000000;
2334
2335         fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2336         fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2337         fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2338         fw.start_addr = bnx2_RXP_b06FwStartAddr;
2339
2340         fw.text_addr = bnx2_RXP_b06FwTextAddr;
2341         fw.text_len = bnx2_RXP_b06FwTextLen;
2342         fw.text_index = 0;
2343
2344         rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2345                          &text, &text_len);
2346         if (rc)
2347                 goto init_cpu_err;
2348
2349         fw.text = text;
2350
2351         fw.data_addr = bnx2_RXP_b06FwDataAddr;
2352         fw.data_len = bnx2_RXP_b06FwDataLen;
2353         fw.data_index = 0;
2354         fw.data = bnx2_RXP_b06FwData;
2355
2356         fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2357         fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2358         fw.sbss_index = 0;
2359         fw.sbss = bnx2_RXP_b06FwSbss;
2360
2361         fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2362         fw.bss_len = bnx2_RXP_b06FwBssLen;
2363         fw.bss_index = 0;
2364         fw.bss = bnx2_RXP_b06FwBss;
2365
2366         fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2367         fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2368         fw.rodata_index = 0;
2369         fw.rodata = bnx2_RXP_b06FwRodata;
2370
2371         load_cpu_fw(bp, &cpu_reg, &fw);
2372
2373         /* Initialize the TX Processor. */
2374         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2375         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2376         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2377         cpu_reg.state = BNX2_TXP_CPU_STATE;
2378         cpu_reg.state_value_clear = 0xffffff;
2379         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2380         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2381         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2382         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2383         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2384         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2385         cpu_reg.mips_view_base = 0x8000000;
2386
2387         fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2388         fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2389         fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2390         fw.start_addr = bnx2_TXP_b06FwStartAddr;
2391
2392         fw.text_addr = bnx2_TXP_b06FwTextAddr;
2393         fw.text_len = bnx2_TXP_b06FwTextLen;
2394         fw.text_index = 0;
2395
2396         rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2397                          &text, &text_len);
2398         if (rc)
2399                 goto init_cpu_err;
2400
2401         fw.text = text;
2402
2403         fw.data_addr = bnx2_TXP_b06FwDataAddr;
2404         fw.data_len = bnx2_TXP_b06FwDataLen;
2405         fw.data_index = 0;
2406         fw.data = bnx2_TXP_b06FwData;
2407
2408         fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2409         fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2410         fw.sbss_index = 0;
2411         fw.sbss = bnx2_TXP_b06FwSbss;
2412
2413         fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2414         fw.bss_len = bnx2_TXP_b06FwBssLen;
2415         fw.bss_index = 0;
2416         fw.bss = bnx2_TXP_b06FwBss;
2417
2418         fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2419         fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2420         fw.rodata_index = 0;
2421         fw.rodata = bnx2_TXP_b06FwRodata;
2422
2423         load_cpu_fw(bp, &cpu_reg, &fw);
2424
2425         /* Initialize the TX Patch-up Processor. */
2426         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2427         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2428         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2429         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2430         cpu_reg.state_value_clear = 0xffffff;
2431         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2432         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2433         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2434         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2435         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2436         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2437         cpu_reg.mips_view_base = 0x8000000;
2438
2439         fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2440         fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2441         fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2442         fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2443
2444         fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2445         fw.text_len = bnx2_TPAT_b06FwTextLen;
2446         fw.text_index = 0;
2447
2448         rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2449                          &text, &text_len);
2450         if (rc)
2451                 goto init_cpu_err;
2452
2453         fw.text = text;
2454
2455         fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2456         fw.data_len = bnx2_TPAT_b06FwDataLen;
2457         fw.data_index = 0;
2458         fw.data = bnx2_TPAT_b06FwData;
2459
2460         fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2461         fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2462         fw.sbss_index = 0;
2463         fw.sbss = bnx2_TPAT_b06FwSbss;
2464
2465         fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2466         fw.bss_len = bnx2_TPAT_b06FwBssLen;
2467         fw.bss_index = 0;
2468         fw.bss = bnx2_TPAT_b06FwBss;
2469
2470         fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2471         fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2472         fw.rodata_index = 0;
2473         fw.rodata = bnx2_TPAT_b06FwRodata;
2474
2475         load_cpu_fw(bp, &cpu_reg, &fw);
2476
2477         /* Initialize the Completion Processor. */
2478         cpu_reg.mode = BNX2_COM_CPU_MODE;
2479         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2480         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2481         cpu_reg.state = BNX2_COM_CPU_STATE;
2482         cpu_reg.state_value_clear = 0xffffff;
2483         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2484         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2485         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2486         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2487         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2488         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2489         cpu_reg.mips_view_base = 0x8000000;
2490
2491         fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2492         fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2493         fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2494         fw.start_addr = bnx2_COM_b06FwStartAddr;
2495
2496         fw.text_addr = bnx2_COM_b06FwTextAddr;
2497         fw.text_len = bnx2_COM_b06FwTextLen;
2498         fw.text_index = 0;
2499
2500         rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2501                          &text, &text_len);
2502         if (rc)
2503                 goto init_cpu_err;
2504
2505         fw.text = text;
2506
2507         fw.data_addr = bnx2_COM_b06FwDataAddr;
2508         fw.data_len = bnx2_COM_b06FwDataLen;
2509         fw.data_index = 0;
2510         fw.data = bnx2_COM_b06FwData;
2511
2512         fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2513         fw.sbss_len = bnx2_COM_b06FwSbssLen;
2514         fw.sbss_index = 0;
2515         fw.sbss = bnx2_COM_b06FwSbss;
2516
2517         fw.bss_addr = bnx2_COM_b06FwBssAddr;
2518         fw.bss_len = bnx2_COM_b06FwBssLen;
2519         fw.bss_index = 0;
2520         fw.bss = bnx2_COM_b06FwBss;
2521
2522         fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2523         fw.rodata_len = bnx2_COM_b06FwRodataLen;
2524         fw.rodata_index = 0;
2525         fw.rodata = bnx2_COM_b06FwRodata;
2526
2527         load_cpu_fw(bp, &cpu_reg, &fw);
2528
2529 init_cpu_err:
2530         bnx2_gunzip_end(bp);
2531         return rc;
2532 }
2533
2534 static int
2535 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2536 {
2537         u16 pmcsr;
2538
2539         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2540
2541         switch (state) {
2542         case PCI_D0: {
2543                 u32 val;
2544
2545                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2546                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2547                         PCI_PM_CTRL_PME_STATUS);
2548
2549                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2550                         /* delay required during transition out of D3hot */
2551                         msleep(20);
2552
2553                 val = REG_RD(bp, BNX2_EMAC_MODE);
2554                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2555                 val &= ~BNX2_EMAC_MODE_MPKT;
2556                 REG_WR(bp, BNX2_EMAC_MODE, val);
2557
2558                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2559                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2560                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2561                 break;
2562         }
2563         case PCI_D3hot: {
2564                 int i;
2565                 u32 val, wol_msg;
2566
2567                 if (bp->wol) {
2568                         u32 advertising;
2569                         u8 autoneg;
2570
2571                         autoneg = bp->autoneg;
2572                         advertising = bp->advertising;
2573
2574                         bp->autoneg = AUTONEG_SPEED;
2575                         bp->advertising = ADVERTISED_10baseT_Half |
2576                                 ADVERTISED_10baseT_Full |
2577                                 ADVERTISED_100baseT_Half |
2578                                 ADVERTISED_100baseT_Full |
2579                                 ADVERTISED_Autoneg;
2580
2581                         bnx2_setup_copper_phy(bp);
2582
2583                         bp->autoneg = autoneg;
2584                         bp->advertising = advertising;
2585
2586                         bnx2_set_mac_addr(bp);
2587
2588                         val = REG_RD(bp, BNX2_EMAC_MODE);
2589
2590                         /* Enable port mode. */
2591                         val &= ~BNX2_EMAC_MODE_PORT;
2592                         val |= BNX2_EMAC_MODE_PORT_MII |
2593                                BNX2_EMAC_MODE_MPKT_RCVD |
2594                                BNX2_EMAC_MODE_ACPI_RCVD |
2595                                BNX2_EMAC_MODE_MPKT;
2596
2597                         REG_WR(bp, BNX2_EMAC_MODE, val);
2598
2599                         /* receive all multicast */
2600                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2601                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2602                                        0xffffffff);
2603                         }
2604                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2605                                BNX2_EMAC_RX_MODE_SORT_MODE);
2606
2607                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2608                               BNX2_RPM_SORT_USER0_MC_EN;
2609                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2610                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2611                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2612                                BNX2_RPM_SORT_USER0_ENA);
2613
2614                         /* Need to enable EMAC and RPM for WOL. */
2615                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2616                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2617                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2618                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2619
2620                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2621                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2622                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2623
2624                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2625                 }
2626                 else {
2627                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2628                 }
2629
2630                 if (!(bp->flags & NO_WOL_FLAG))
2631                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2632
2633                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2634                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2635                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2636
2637                         if (bp->wol)
2638                                 pmcsr |= 3;
2639                 }
2640                 else {
2641                         pmcsr |= 3;
2642                 }
2643                 if (bp->wol) {
2644                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2645                 }
2646                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2647                                       pmcsr);
2648
2649                 /* No more memory access after this point until
2650                  * device is brought back to D0.
2651                  */
2652                 udelay(50);
2653                 break;
2654         }
2655         default:
2656                 return -EINVAL;
2657         }
2658         return 0;
2659 }
2660
2661 static int
2662 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2663 {
2664         u32 val;
2665         int j;
2666
2667         /* Request access to the flash interface. */
2668         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2669         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2670                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2671                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2672                         break;
2673
2674                 udelay(5);
2675         }
2676
2677         if (j >= NVRAM_TIMEOUT_COUNT)
2678                 return -EBUSY;
2679
2680         return 0;
2681 }
2682
2683 static int
2684 bnx2_release_nvram_lock(struct bnx2 *bp)
2685 {
2686         int j;
2687         u32 val;
2688
2689         /* Relinquish nvram interface. */
2690         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2691
2692         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2693                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2694                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2695                         break;
2696
2697                 udelay(5);
2698         }
2699
2700         if (j >= NVRAM_TIMEOUT_COUNT)
2701                 return -EBUSY;
2702
2703         return 0;
2704 }
2705
2706
2707 static int
2708 bnx2_enable_nvram_write(struct bnx2 *bp)
2709 {
2710         u32 val;
2711
2712         val = REG_RD(bp, BNX2_MISC_CFG);
2713         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2714
2715         if (!bp->flash_info->buffered) {
2716                 int j;
2717
2718                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2719                 REG_WR(bp, BNX2_NVM_COMMAND,
2720                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2721
2722                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2723                         udelay(5);
2724
2725                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2726                         if (val & BNX2_NVM_COMMAND_DONE)
2727                                 break;
2728                 }
2729
2730                 if (j >= NVRAM_TIMEOUT_COUNT)
2731                         return -EBUSY;
2732         }
2733         return 0;
2734 }
2735
2736 static void
2737 bnx2_disable_nvram_write(struct bnx2 *bp)
2738 {
2739         u32 val;
2740
2741         val = REG_RD(bp, BNX2_MISC_CFG);
2742         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2743 }
2744
2745
2746 static void
2747 bnx2_enable_nvram_access(struct bnx2 *bp)
2748 {
2749         u32 val;
2750
2751         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2752         /* Enable both bits, even on read. */
2753         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2754                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2755 }
2756
2757 static void
2758 bnx2_disable_nvram_access(struct bnx2 *bp)
2759 {
2760         u32 val;
2761
2762         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2763         /* Disable both bits, even after read. */
2764         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2765                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2766                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2767 }
2768
2769 static int
2770 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2771 {
2772         u32 cmd;
2773         int j;
2774
2775         if (bp->flash_info->buffered)
2776                 /* Buffered flash, no erase needed */
2777                 return 0;
2778
2779         /* Build an erase command */
2780         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2781               BNX2_NVM_COMMAND_DOIT;
2782
2783         /* Need to clear DONE bit separately. */
2784         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2785
2786         /* Address of the NVRAM to read from. */
2787         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2788
2789         /* Issue an erase command. */
2790         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2791
2792         /* Wait for completion. */
2793         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2794                 u32 val;
2795
2796                 udelay(5);
2797
2798                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2799                 if (val & BNX2_NVM_COMMAND_DONE)
2800                         break;
2801         }
2802
2803         if (j >= NVRAM_TIMEOUT_COUNT)
2804                 return -EBUSY;
2805
2806         return 0;
2807 }
2808
2809 static int
2810 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2811 {
2812         u32 cmd;
2813         int j;
2814
2815         /* Build the command word. */
2816         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2817
2818         /* Calculate an offset of a buffered flash. */
2819         if (bp->flash_info->buffered) {
2820                 offset = ((offset / bp->flash_info->page_size) <<
2821                            bp->flash_info->page_bits) +
2822                           (offset % bp->flash_info->page_size);
2823         }
2824
2825         /* Need to clear DONE bit separately. */
2826         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2827
2828         /* Address of the NVRAM to read from. */
2829         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2830
2831         /* Issue a read command. */
2832         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2833
2834         /* Wait for completion. */
2835         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2836                 u32 val;
2837
2838                 udelay(5);
2839
2840                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2841                 if (val & BNX2_NVM_COMMAND_DONE) {
2842                         val = REG_RD(bp, BNX2_NVM_READ);
2843
2844                         val = be32_to_cpu(val);
2845                         memcpy(ret_val, &val, 4);
2846                         break;
2847                 }
2848         }
2849         if (j >= NVRAM_TIMEOUT_COUNT)
2850                 return -EBUSY;
2851
2852         return 0;
2853 }
2854
2855
2856 static int
2857 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2858 {
2859         u32 cmd, val32;
2860         int j;
2861
2862         /* Build the command word. */
2863         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2864
2865         /* Calculate an offset of a buffered flash. */
2866         if (bp->flash_info->buffered) {
2867                 offset = ((offset / bp->flash_info->page_size) <<
2868                           bp->flash_info->page_bits) +
2869                          (offset % bp->flash_info->page_size);
2870         }
2871
2872         /* Need to clear DONE bit separately. */
2873         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2874
2875         memcpy(&val32, val, 4);
2876         val32 = cpu_to_be32(val32);
2877
2878         /* Write the data. */
2879         REG_WR(bp, BNX2_NVM_WRITE, val32);
2880
2881         /* Address of the NVRAM to write to. */
2882         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2883
2884         /* Issue the write command. */
2885         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2886
2887         /* Wait for completion. */
2888         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2889                 udelay(5);
2890
2891                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2892                         break;
2893         }
2894         if (j >= NVRAM_TIMEOUT_COUNT)
2895                 return -EBUSY;
2896
2897         return 0;
2898 }
2899
2900 static int
2901 bnx2_init_nvram(struct bnx2 *bp)
2902 {
2903         u32 val;
2904         int j, entry_count, rc;
2905         struct flash_spec *flash;
2906
2907         /* Determine the selected interface. */
2908         val = REG_RD(bp, BNX2_NVM_CFG1);
2909
2910         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2911
2912         rc = 0;
2913         if (val & 0x40000000) {
2914
2915                 /* Flash interface has been reconfigured */
2916                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2917                      j++, flash++) {
2918                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2919                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2920                                 bp->flash_info = flash;
2921                                 break;
2922                         }
2923                 }
2924         }
2925         else {
2926                 u32 mask;
2927                 /* Not yet been reconfigured */
2928
2929                 if (val & (1 << 23))
2930                         mask = FLASH_BACKUP_STRAP_MASK;
2931                 else
2932                         mask = FLASH_STRAP_MASK;
2933
2934                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2935                         j++, flash++) {
2936
2937                         if ((val & mask) == (flash->strapping & mask)) {
2938                                 bp->flash_info = flash;
2939
2940                                 /* Request access to the flash interface. */
2941                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2942                                         return rc;
2943
2944                                 /* Enable access to flash interface */
2945                                 bnx2_enable_nvram_access(bp);
2946
2947                                 /* Reconfigure the flash interface */
2948                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2949                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2950                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2951                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2952
2953                                 /* Disable access to flash interface */
2954                                 bnx2_disable_nvram_access(bp);
2955                                 bnx2_release_nvram_lock(bp);
2956
2957                                 break;
2958                         }
2959                 }
2960         } /* if (val & 0x40000000) */
2961
2962         if (j == entry_count) {
2963                 bp->flash_info = NULL;
2964                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2965                 return -ENODEV;
2966         }
2967
2968         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2969         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2970         if (val)
2971                 bp->flash_size = val;
2972         else
2973                 bp->flash_size = bp->flash_info->total_size;
2974
2975         return rc;
2976 }
2977
2978 static int
2979 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2980                 int buf_size)
2981 {
2982         int rc = 0;
2983         u32 cmd_flags, offset32, len32, extra;
2984
2985         if (buf_size == 0)
2986                 return 0;
2987
2988         /* Request access to the flash interface. */
2989         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2990                 return rc;
2991
2992         /* Enable access to flash interface */
2993         bnx2_enable_nvram_access(bp);
2994
2995         len32 = buf_size;
2996         offset32 = offset;
2997         extra = 0;
2998
2999         cmd_flags = 0;
3000
3001         if (offset32 & 3) {
3002                 u8 buf[4];
3003                 u32 pre_len;
3004
3005                 offset32 &= ~3;
3006                 pre_len = 4 - (offset & 3);
3007
3008                 if (pre_len >= len32) {
3009                         pre_len = len32;
3010                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3011                                     BNX2_NVM_COMMAND_LAST;
3012                 }
3013                 else {
3014                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3015                 }
3016
3017                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3018
3019                 if (rc)
3020                         return rc;
3021
3022                 memcpy(ret_buf, buf + (offset & 3), pre_len);
3023
3024                 offset32 += 4;
3025                 ret_buf += pre_len;
3026                 len32 -= pre_len;
3027         }
3028         if (len32 & 3) {
3029                 extra = 4 - (len32 & 3);
3030                 len32 = (len32 + 4) & ~3;
3031         }
3032
3033         if (len32 == 4) {
3034                 u8 buf[4];
3035
3036                 if (cmd_flags)
3037                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3038                 else
3039                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
3040                                     BNX2_NVM_COMMAND_LAST;
3041
3042                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3043
3044                 memcpy(ret_buf, buf, 4 - extra);
3045         }
3046         else if (len32 > 0) {
3047                 u8 buf[4];
3048
3049                 /* Read the first word. */
3050                 if (cmd_flags)
3051                         cmd_flags = 0;
3052                 else
3053                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3054
3055                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3056
3057                 /* Advance to the next dword. */
3058                 offset32 += 4;
3059                 ret_buf += 4;
3060                 len32 -= 4;
3061
3062                 while (len32 > 4 && rc == 0) {
3063                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3064
3065                         /* Advance to the next dword. */
3066                         offset32 += 4;
3067                         ret_buf += 4;
3068                         len32 -= 4;
3069                 }
3070
3071                 if (rc)
3072                         return rc;
3073
3074                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3075                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3076
3077                 memcpy(ret_buf, buf, 4 - extra);
3078         }
3079
3080         /* Disable access to flash interface */
3081         bnx2_disable_nvram_access(bp);
3082
3083         bnx2_release_nvram_lock(bp);
3084
3085         return rc;
3086 }
3087
3088 static int
3089 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3090                 int buf_size)
3091 {
3092         u32 written, offset32, len32;
3093         u8 *buf, start[4], end[4], *flash_buffer = NULL;
3094         int rc = 0;
3095         int align_start, align_end;
3096
3097         buf = data_buf;
3098         offset32 = offset;
3099         len32 = buf_size;
3100         align_start = align_end = 0;
3101
3102         if ((align_start = (offset32 & 3))) {
3103                 offset32 &= ~3;
3104                 len32 += align_start;
3105                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3106                         return rc;
3107         }
3108
3109         if (len32 & 3) {
3110                 if ((len32 > 4) || !align_start) {
3111                         align_end = 4 - (len32 & 3);
3112                         len32 += align_end;
3113                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3114                                 end, 4))) {
3115                                 return rc;
3116                         }
3117                 }
3118         }
3119
3120         if (align_start || align_end) {
3121                 buf = kmalloc(len32, GFP_KERNEL);
3122                 if (buf == 0)
3123                         return -ENOMEM;
3124                 if (align_start) {
3125                         memcpy(buf, start, 4);
3126                 }
3127                 if (align_end) {
3128                         memcpy(buf + len32 - 4, end, 4);
3129                 }
3130                 memcpy(buf + align_start, data_buf, buf_size);
3131         }
3132
3133         if (bp->flash_info->buffered == 0) {
3134                 flash_buffer = kmalloc(264, GFP_KERNEL);
3135                 if (flash_buffer == NULL) {
3136                         rc = -ENOMEM;
3137                         goto nvram_write_end;
3138                 }
3139         }
3140
3141         written = 0;
3142         while ((written < len32) && (rc == 0)) {
3143                 u32 page_start, page_end, data_start, data_end;
3144                 u32 addr, cmd_flags;
3145                 int i;
3146
3147                 /* Find the page_start addr */
3148                 page_start = offset32 + written;
3149                 page_start -= (page_start % bp->flash_info->page_size);
3150                 /* Find the page_end addr */
3151                 page_end = page_start + bp->flash_info->page_size;
3152                 /* Find the data_start addr */
3153                 data_start = (written == 0) ? offset32 : page_start;
3154                 /* Find the data_end addr */
3155                 data_end = (page_end > offset32 + len32) ?
3156                         (offset32 + len32) : page_end;
3157
3158                 /* Request access to the flash interface. */
3159                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3160                         goto nvram_write_end;
3161
3162                 /* Enable access to flash interface */
3163                 bnx2_enable_nvram_access(bp);
3164
3165                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3166                 if (bp->flash_info->buffered == 0) {
3167                         int j;
3168
3169                         /* Read the whole page into the buffer
3170                          * (non-buffer flash only) */
3171                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3172                                 if (j == (bp->flash_info->page_size - 4)) {
3173                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3174                                 }
3175                                 rc = bnx2_nvram_read_dword(bp,
3176                                         page_start + j,
3177                                         &flash_buffer[j],
3178                                         cmd_flags);
3179
3180                                 if (rc)
3181                                         goto nvram_write_end;
3182
3183                                 cmd_flags = 0;
3184                         }
3185                 }
3186
3187                 /* Enable writes to flash interface (unlock write-protect) */
3188                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3189                         goto nvram_write_end;
3190
3191                 /* Erase the page */
3192                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3193                         goto nvram_write_end;
3194
3195                 /* Re-enable the write again for the actual write */
3196                 bnx2_enable_nvram_write(bp);
3197
3198                 /* Loop to write back the buffer data from page_start to
3199                  * data_start */
3200                 i = 0;
3201                 if (bp->flash_info->buffered == 0) {
3202                         for (addr = page_start; addr < data_start;
3203                                 addr += 4, i += 4) {
3204
3205                                 rc = bnx2_nvram_write_dword(bp, addr,
3206                                         &flash_buffer[i], cmd_flags);
3207
3208                                 if (rc != 0)
3209                                         goto nvram_write_end;
3210
3211                                 cmd_flags = 0;
3212                         }
3213                 }
3214
3215                 /* Loop to write the new data from data_start to data_end */
3216                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3217                         if ((addr == page_end - 4) ||
3218                                 ((bp->flash_info->buffered) &&
3219                                  (addr == data_end - 4))) {
3220
3221                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3222                         }
3223                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3224                                 cmd_flags);
3225
3226                         if (rc != 0)
3227                                 goto nvram_write_end;
3228
3229                         cmd_flags = 0;
3230                         buf += 4;
3231                 }
3232
3233                 /* Loop to write back the buffer data from data_end
3234                  * to page_end */
3235                 if (bp->flash_info->buffered == 0) {
3236                         for (addr = data_end; addr < page_end;
3237                                 addr += 4, i += 4) {
3238
3239                                 if (addr == page_end-4) {
3240                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3241                                 }
3242                                 rc = bnx2_nvram_write_dword(bp, addr,
3243                                         &flash_buffer[i], cmd_flags);
3244
3245                                 if (rc != 0)
3246                                         goto nvram_write_end;
3247
3248                                 cmd_flags = 0;
3249                         }
3250                 }
3251
3252                 /* Disable writes to flash interface (lock write-protect) */
3253                 bnx2_disable_nvram_write(bp);
3254
3255                 /* Disable access to flash interface */
3256                 bnx2_disable_nvram_access(bp);
3257                 bnx2_release_nvram_lock(bp);
3258
3259                 /* Increment written */
3260                 written += data_end - data_start;
3261         }
3262
3263 nvram_write_end:
3264         if (bp->flash_info->buffered == 0)
3265                 kfree(flash_buffer);
3266
3267         if (align_start || align_end)
3268                 kfree(buf);
3269         return rc;
3270 }
3271
3272 static int
3273 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3274 {
3275         u32 val;
3276         int i, rc = 0;
3277
3278         /* Wait for the current PCI transaction to complete before
3279          * issuing a reset. */
3280         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3281                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3282                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3283                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3284                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3285         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3286         udelay(5);
3287
3288         /* Wait for the firmware to tell us it is ok to issue a reset. */
3289         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3290
3291         /* Deposit a driver reset signature so the firmware knows that
3292          * this is a soft reset. */
3293         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3294                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3295
3296         /* Do a dummy read to force the chip to complete all current transaction
3297          * before we issue a reset. */
3298         val = REG_RD(bp, BNX2_MISC_ID);
3299
3300         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3301               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3302               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3303
3304         /* Chip reset. */
3305         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3306
3307         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3308             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3309                 msleep(15);
3310
3311         /* Reset takes approximate 30 usec */
3312         for (i = 0; i < 10; i++) {
3313                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3314                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3315                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3316                         break;
3317                 }
3318                 udelay(10);
3319         }
3320
3321         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3323                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3324                 return -EBUSY;
3325         }
3326
3327         /* Make sure byte swapping is properly configured. */
3328         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3329         if (val != 0x01020304) {
3330                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3331                 return -ENODEV;
3332         }
3333
3334         /* Wait for the firmware to finish its initialization. */
3335         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3336         if (rc)
3337                 return rc;
3338
3339         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3340                 /* Adjust the voltage regular to two steps lower.  The default
3341                  * of this register is 0x0000000e. */
3342                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3343
3344                 /* Remove bad rbuf memory from the free pool. */
3345                 rc = bnx2_alloc_bad_rbuf(bp);
3346         }
3347
3348         return rc;
3349 }
3350
3351 static int
3352 bnx2_init_chip(struct bnx2 *bp)
3353 {
3354         u32 val;
3355         int rc;
3356
3357         /* Make sure the interrupt is not active. */
3358         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3359
3360         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3361               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3362 #ifdef __BIG_ENDIAN
3363               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3364 #endif
3365               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3366               DMA_READ_CHANS << 12 |
3367               DMA_WRITE_CHANS << 16;
3368
3369         val |= (0x2 << 20) | (1 << 11);
3370
3371         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3372                 val |= (1 << 23);
3373
3374         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3375             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3376                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3377
3378         REG_WR(bp, BNX2_DMA_CONFIG, val);
3379
3380         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3381                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3382                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3383                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3384         }
3385
3386         if (bp->flags & PCIX_FLAG) {
3387                 u16 val16;
3388
3389                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3390                                      &val16);
3391                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3392                                       val16 & ~PCI_X_CMD_ERO);
3393         }
3394
3395         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3396                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3397                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3398                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3399
3400         /* Initialize context mapping and zero out the quick contexts.  The
3401          * context block must have already been enabled. */
3402         bnx2_init_context(bp);
3403
3404         if ((rc = bnx2_init_cpus(bp)) != 0)
3405                 return rc;
3406
3407         bnx2_init_nvram(bp);
3408
3409         bnx2_set_mac_addr(bp);
3410
3411         val = REG_RD(bp, BNX2_MQ_CONFIG);
3412         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3413         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3414         REG_WR(bp, BNX2_MQ_CONFIG, val);
3415
3416         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3417         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3418         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3419
3420         val = (BCM_PAGE_BITS - 8) << 24;
3421         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3422
3423         /* Configure page size. */
3424         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3425         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3426         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3427         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3428
3429         val = bp->mac_addr[0] +
3430               (bp->mac_addr[1] << 8) +
3431               (bp->mac_addr[2] << 16) +
3432               bp->mac_addr[3] +
3433               (bp->mac_addr[4] << 8) +
3434               (bp->mac_addr[5] << 16);
3435         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3436
3437         /* Program the MTU.  Also include 4 bytes for CRC32. */
3438         val = bp->dev->mtu + ETH_HLEN + 4;
3439         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3440                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3441         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3442
3443         bp->last_status_idx = 0;
3444         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3445
3446         /* Set up how to generate a link change interrupt. */
3447         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3448
3449         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3450                (u64) bp->status_blk_mapping & 0xffffffff);
3451         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3452
3453         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3454                (u64) bp->stats_blk_mapping & 0xffffffff);
3455         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3456                (u64) bp->stats_blk_mapping >> 32);
3457
3458         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3459                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3460
3461         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3462                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3463
3464         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3465                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3466
3467         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3468
3469         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3470
3471         REG_WR(bp, BNX2_HC_COM_TICKS,
3472                (bp->com_ticks_int << 16) | bp->com_ticks);
3473
3474         REG_WR(bp, BNX2_HC_CMD_TICKS,
3475                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3476
3477         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3478         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3479
3480         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3481                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3482         else {
3483                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3484                        BNX2_HC_CONFIG_TX_TMR_MODE |
3485                        BNX2_HC_CONFIG_COLLECT_STATS);
3486         }
3487
3488         /* Clear internal stats counters. */
3489         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3490
3491         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3492
3493         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3494             BNX2_PORT_FEATURE_ASF_ENABLED)
3495                 bp->flags |= ASF_ENABLE_FLAG;
3496
3497         /* Initialize the receive filter. */
3498         bnx2_set_rx_mode(bp->dev);
3499
3500         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3501                           0);
3502
3503         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3504         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3505
3506         udelay(20);
3507
3508         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3509
3510         return rc;
3511 }
3512
3513
3514 static void
3515 bnx2_init_tx_ring(struct bnx2 *bp)
3516 {
3517         struct tx_bd *txbd;
3518         u32 val;
3519
3520         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3521
3522         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3523
3524         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3525         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3526
3527         bp->tx_prod = 0;
3528         bp->tx_cons = 0;
3529         bp->hw_tx_cons = 0;
3530         bp->tx_prod_bseq = 0;
3531
3532         val = BNX2_L2CTX_TYPE_TYPE_L2;
3533         val |= BNX2_L2CTX_TYPE_SIZE_L2;
3534         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3535
3536         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3537         val |= 8 << 16;
3538         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3539
3540         val = (u64) bp->tx_desc_mapping >> 32;
3541         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3542
3543         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3544         CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3545 }
3546
3547 static void
3548 bnx2_init_rx_ring(struct bnx2 *bp)
3549 {
3550         struct rx_bd *rxbd;
3551         int i;
3552         u16 prod, ring_prod;
3553         u32 val;
3554
3555         /* 8 for CRC and VLAN */
3556         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3557         /* 8 for alignment */
3558         bp->rx_buf_size = bp->rx_buf_use_size + 8;
3559
3560         ring_prod = prod = bp->rx_prod = 0;
3561         bp->rx_cons = 0;
3562         bp->hw_rx_cons = 0;
3563         bp->rx_prod_bseq = 0;
3564
3565         for (i = 0; i < bp->rx_max_ring; i++) {
3566                 int j;
3567
3568                 rxbd = &bp->rx_desc_ring[i][0];
3569                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3570                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3571                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3572                 }
3573                 if (i == (bp->rx_max_ring - 1))
3574                         j = 0;
3575                 else
3576                         j = i + 1;
3577                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3578                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3579                                        0xffffffff;
3580         }
3581
3582         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3583         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3584         val |= 0x02 << 8;
3585         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3586
3587         val = (u64) bp->rx_desc_mapping[0] >> 32;
3588         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3589
3590         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3591         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3592
3593         for (i = 0; i < bp->rx_ring_size; i++) {
3594                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3595                         break;
3596                 }
3597                 prod = NEXT_RX_BD(prod);
3598                 ring_prod = RX_RING_IDX(prod);
3599         }
3600         bp->rx_prod = prod;
3601
3602         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3603
3604         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3605 }
3606
3607 static void
3608 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3609 {
3610         u32 num_rings, max;
3611
3612         bp->rx_ring_size = size;
3613         num_rings = 1;
3614         while (size > MAX_RX_DESC_CNT) {
3615                 size -= MAX_RX_DESC_CNT;
3616                 num_rings++;
3617         }
3618         /* round to next power of 2 */
3619         max = MAX_RX_RINGS;
3620         while ((max & num_rings) == 0)
3621                 max >>= 1;
3622
3623         if (num_rings != max)
3624                 max <<= 1;
3625
3626         bp->rx_max_ring = max;
3627         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3628 }
3629
3630 static void
3631 bnx2_free_tx_skbs(struct bnx2 *bp)
3632 {
3633         int i;
3634
3635         if (bp->tx_buf_ring == NULL)
3636                 return;
3637
3638         for (i = 0; i < TX_DESC_CNT; ) {
3639                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3640                 struct sk_buff *skb = tx_buf->skb;
3641                 int j, last;
3642
3643                 if (skb == NULL) {
3644                         i++;
3645                         continue;
3646                 }
3647
3648                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3649                         skb_headlen(skb), PCI_DMA_TODEVICE);
3650
3651                 tx_buf->skb = NULL;
3652
3653                 last = skb_shinfo(skb)->nr_frags;
3654                 for (j = 0; j < last; j++) {
3655                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3656                         pci_unmap_page(bp->pdev,
3657                                 pci_unmap_addr(tx_buf, mapping),
3658                                 skb_shinfo(skb)->frags[j].size,
3659                                 PCI_DMA_TODEVICE);
3660                 }
3661                 dev_kfree_skb(skb);
3662                 i += j + 1;
3663         }
3664
3665 }
3666
3667 static void
3668 bnx2_free_rx_skbs(struct bnx2 *bp)
3669 {
3670         int i;
3671
3672         if (bp->rx_buf_ring == NULL)
3673                 return;
3674
3675         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3676                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3677                 struct sk_buff *skb = rx_buf->skb;
3678
3679                 if (skb == NULL)
3680                         continue;
3681
3682                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3683                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3684
3685                 rx_buf->skb = NULL;
3686
3687                 dev_kfree_skb(skb);
3688         }
3689 }
3690
3691 static void
3692 bnx2_free_skbs(struct bnx2 *bp)
3693 {
3694         bnx2_free_tx_skbs(bp);
3695         bnx2_free_rx_skbs(bp);
3696 }
3697
3698 static int
3699 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3700 {
3701         int rc;
3702
3703         rc = bnx2_reset_chip(bp, reset_code);
3704         bnx2_free_skbs(bp);
3705         if (rc)
3706                 return rc;
3707
3708         if ((rc = bnx2_init_chip(bp)) != 0)
3709                 return rc;
3710
3711         bnx2_init_tx_ring(bp);
3712         bnx2_init_rx_ring(bp);
3713         return 0;
3714 }
3715
3716 static int
3717 bnx2_init_nic(struct bnx2 *bp)
3718 {
3719         int rc;
3720
3721         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3722                 return rc;
3723
3724         spin_lock_bh(&bp->phy_lock);
3725         bnx2_init_phy(bp);
3726         spin_unlock_bh(&bp->phy_lock);
3727         bnx2_set_link(bp);
3728         return 0;
3729 }
3730
3731 static int
3732 bnx2_test_registers(struct bnx2 *bp)
3733 {
3734         int ret;
3735         int i;
3736         static const struct {
3737                 u16   offset;
3738                 u16   flags;
3739                 u32   rw_mask;
3740                 u32   ro_mask;
3741         } reg_tbl[] = {
3742                 { 0x006c, 0, 0x00000000, 0x0000003f },
3743                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3744                 { 0x0094, 0, 0x00000000, 0x00000000 },
3745
3746                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3747                 { 0x0418, 0, 0x00000000, 0xffffffff },
3748                 { 0x041c, 0, 0x00000000, 0xffffffff },
3749                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3750                 { 0x0424, 0, 0x00000000, 0x00000000 },
3751                 { 0x0428, 0, 0x00000000, 0x00000001 },
3752                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3753                 { 0x0454, 0, 0x00000000, 0xffffffff },
3754                 { 0x0458, 0, 0x00000000, 0xffffffff },
3755
3756                 { 0x0808, 0, 0x00000000, 0xffffffff },
3757                 { 0x0854, 0, 0x00000000, 0xffffffff },
3758                 { 0x0868, 0, 0x00000000, 0x77777777 },
3759                 { 0x086c, 0, 0x00000000, 0x77777777 },
3760                 { 0x0870, 0, 0x00000000, 0x77777777 },
3761                 { 0x0874, 0, 0x00000000, 0x77777777 },
3762
3763                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3764                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3765                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3766
3767                 { 0x1000, 0, 0x00000000, 0x00000001 },
3768                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3769
3770                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3771                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3772                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3773                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3774                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3775                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3776                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3777                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3778                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3779                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3780
3781                 { 0x1800, 0, 0x00000000, 0x00000001 },
3782                 { 0x1804, 0, 0x00000000, 0x00000003 },
3783
3784                 { 0x2800, 0, 0x00000000, 0x00000001 },
3785                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3786                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3787                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3788                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3789                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3790                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3791                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3792                 { 0x2840, 0, 0x00000000, 0xffffffff },
3793                 { 0x2844, 0, 0x00000000, 0xffffffff },
3794                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3795                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3796
3797                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3798                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3799
3800                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3801                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3802                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3803                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3804                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3805                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3806                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3807                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3808                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3809
3810                 { 0x5004, 0, 0x00000000, 0x0000007f },
3811                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3812                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3813
3814                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3815                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3816                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3817                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3818                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3819                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3820                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3821                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3822                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3823
3824                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3825                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3826                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3827                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3828                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3829                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3830                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3831                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3832                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3833                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3834                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3835                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3836                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3837                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3838                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3839                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3840                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3841                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3842                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3843                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3844                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3845                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3846                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3847
3848                 { 0xffff, 0, 0x00000000, 0x00000000 },
3849         };
3850
3851         ret = 0;
3852         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3853                 u32 offset, rw_mask, ro_mask, save_val, val;
3854
3855                 offset = (u32) reg_tbl[i].offset;
3856                 rw_mask = reg_tbl[i].rw_mask;
3857                 ro_mask = reg_tbl[i].ro_mask;
3858
3859                 save_val = readl(bp->regview + offset);
3860
3861                 writel(0, bp->regview + offset);
3862
3863                 val = readl(bp->regview + offset);
3864                 if ((val & rw_mask) != 0) {
3865                         goto reg_test_err;
3866                 }
3867
3868                 if ((val & ro_mask) != (save_val & ro_mask)) {
3869                         goto reg_test_err;
3870                 }
3871
3872                 writel(0xffffffff, bp->regview + offset);
3873
3874                 val = readl(bp->regview + offset);
3875                 if ((val & rw_mask) != rw_mask) {
3876                         goto reg_test_err;
3877                 }
3878
3879                 if ((val & ro_mask) != (save_val & ro_mask)) {
3880                         goto reg_test_err;
3881                 }
3882
3883                 writel(save_val, bp->regview + offset);
3884                 continue;
3885
3886 reg_test_err:
3887                 writel(save_val, bp->regview + offset);
3888                 ret = -ENODEV;
3889                 break;
3890         }
3891         return ret;
3892 }
3893
3894 static int
3895 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3896 {
3897         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3898                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3899         int i;
3900
3901         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3902                 u32 offset;
3903
3904                 for (offset = 0; offset < size; offset += 4) {
3905
3906                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3907
3908                         if (REG_RD_IND(bp, start + offset) !=
3909                                 test_pattern[i]) {
3910                                 return -ENODEV;
3911                         }
3912                 }
3913         }
3914         return 0;
3915 }
3916
3917 static int
3918 bnx2_test_memory(struct bnx2 *bp)
3919 {
3920         int ret = 0;
3921         int i;
3922         static const struct {
3923                 u32   offset;
3924                 u32   len;
3925         } mem_tbl[] = {
3926                 { 0x60000,  0x4000 },
3927                 { 0xa0000,  0x3000 },
3928                 { 0xe0000,  0x4000 },
3929                 { 0x120000, 0x4000 },
3930                 { 0x1a0000, 0x4000 },
3931                 { 0x160000, 0x4000 },
3932                 { 0xffffffff, 0    },
3933         };
3934
3935         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3936                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3937                         mem_tbl[i].len)) != 0) {
3938                         return ret;
3939                 }
3940         }
3941
3942         return ret;
3943 }
3944
3945 #define BNX2_MAC_LOOPBACK       0
3946 #define BNX2_PHY_LOOPBACK       1
3947
3948 static int
3949 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3950 {
3951         unsigned int pkt_size, num_pkts, i;
3952         struct sk_buff *skb, *rx_skb;
3953         unsigned char *packet;
3954         u16 rx_start_idx, rx_idx;
3955         dma_addr_t map;
3956         struct tx_bd *txbd;
3957         struct sw_bd *rx_buf;
3958         struct l2_fhdr *rx_hdr;
3959         int ret = -ENODEV;
3960
3961         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3962                 bp->loopback = MAC_LOOPBACK;
3963                 bnx2_set_mac_loopback(bp);
3964         }
3965         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3966                 bp->loopback = PHY_LOOPBACK;
3967                 bnx2_set_phy_loopback(bp);
3968         }
3969         else
3970                 return -EINVAL;
3971
3972         pkt_size = 1514;
3973         skb = netdev_alloc_skb(bp->dev, pkt_size);
3974         if (!skb)
3975                 return -ENOMEM;
3976         packet = skb_put(skb, pkt_size);
3977         memcpy(packet, bp->mac_addr, 6);
3978         memset(packet + 6, 0x0, 8);
3979         for (i = 14; i < pkt_size; i++)
3980                 packet[i] = (unsigned char) (i & 0xff);
3981
3982         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3983                 PCI_DMA_TODEVICE);
3984
3985         REG_WR(bp, BNX2_HC_COMMAND,
3986                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3987
3988         REG_RD(bp, BNX2_HC_COMMAND);
3989
3990         udelay(5);
3991         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3992
3993         num_pkts = 0;
3994
3995         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3996
3997         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3998         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3999         txbd->tx_bd_mss_nbytes = pkt_size;
4000         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4001
4002         num_pkts++;
4003         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4004         bp->tx_prod_bseq += pkt_size;
4005
4006         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4007         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4008
4009         udelay(100);
4010
4011         REG_WR(bp, BNX2_HC_COMMAND,
4012                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4013
4014         REG_RD(bp, BNX2_HC_COMMAND);
4015
4016         udelay(5);
4017
4018         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4019         dev_kfree_skb(skb);
4020
4021         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4022                 goto loopback_test_done;
4023         }
4024
4025         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4026         if (rx_idx != rx_start_idx + num_pkts) {
4027                 goto loopback_test_done;
4028         }
4029
4030         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4031         rx_skb = rx_buf->skb;
4032
4033         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4034         skb_reserve(rx_skb, bp->rx_offset);
4035
4036         pci_dma_sync_single_for_cpu(bp->pdev,
4037                 pci_unmap_addr(rx_buf, mapping),
4038                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4039
4040         if (rx_hdr->l2_fhdr_status &
4041                 (L2_FHDR_ERRORS_BAD_CRC |
4042                 L2_FHDR_ERRORS_PHY_DECODE |
4043                 L2_FHDR_ERRORS_ALIGNMENT |
4044                 L2_FHDR_ERRORS_TOO_SHORT |
4045                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4046
4047                 goto loopback_test_done;
4048         }
4049
4050         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4051                 goto loopback_test_done;
4052         }
4053
4054         for (i = 14; i < pkt_size; i++) {
4055                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4056                         goto loopback_test_done;
4057                 }
4058         }
4059
4060         ret = 0;
4061
4062 loopback_test_done:
4063         bp->loopback = 0;
4064         return ret;
4065 }
4066
4067 #define BNX2_MAC_LOOPBACK_FAILED        1
4068 #define BNX2_PHY_LOOPBACK_FAILED        2
4069 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4070                                          BNX2_PHY_LOOPBACK_FAILED)
4071
4072 static int
4073 bnx2_test_loopback(struct bnx2 *bp)
4074 {
4075         int rc = 0;
4076
4077         if (!netif_running(bp->dev))
4078                 return BNX2_LOOPBACK_FAILED;
4079
4080         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4081         spin_lock_bh(&bp->phy_lock);
4082         bnx2_init_phy(bp);
4083         spin_unlock_bh(&bp->phy_lock);
4084         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4085                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4086         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4087                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4088         return rc;
4089 }
4090
4091 #define NVRAM_SIZE 0x200
4092 #define CRC32_RESIDUAL 0xdebb20e3
4093
4094 static int
4095 bnx2_test_nvram(struct bnx2 *bp)
4096 {
4097         u32 buf[NVRAM_SIZE / 4];
4098         u8 *data = (u8 *) buf;
4099         int rc = 0;
4100         u32 magic, csum;
4101
4102         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4103                 goto test_nvram_done;
4104
4105         magic = be32_to_cpu(buf[0]);
4106         if (magic != 0x669955aa) {
4107                 rc = -ENODEV;
4108                 goto test_nvram_done;
4109         }
4110
4111         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4112                 goto test_nvram_done;
4113
4114         csum = ether_crc_le(0x100, data);
4115         if (csum != CRC32_RESIDUAL) {
4116                 rc = -ENODEV;
4117                 goto test_nvram_done;
4118         }
4119
4120         csum = ether_crc_le(0x100, data + 0x100);
4121         if (csum != CRC32_RESIDUAL) {
4122                 rc = -ENODEV;
4123         }
4124
4125 test_nvram_done:
4126         return rc;
4127 }
4128
4129 static int
4130 bnx2_test_link(struct bnx2 *bp)
4131 {
4132         u32 bmsr;
4133
4134         spin_lock_bh(&bp->phy_lock);
4135         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4136         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4137         spin_unlock_bh(&bp->phy_lock);
4138
4139         if (bmsr & BMSR_LSTATUS) {
4140                 return 0;
4141         }
4142         return -ENODEV;
4143 }
4144
4145 static int
4146 bnx2_test_intr(struct bnx2 *bp)
4147 {
4148         int i;
4149         u16 status_idx;
4150
4151         if (!netif_running(bp->dev))
4152                 return -ENODEV;
4153
4154         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4155
4156         /* This register is not touched during run-time. */
4157         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4158         REG_RD(bp, BNX2_HC_COMMAND);
4159
4160         for (i = 0; i < 10; i++) {
4161                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4162                         status_idx) {
4163
4164                         break;
4165                 }
4166
4167                 msleep_interruptible(10);
4168         }
4169         if (i < 10)
4170                 return 0;
4171
4172         return -ENODEV;
4173 }
4174
4175 static void
4176 bnx2_5706_serdes_timer(struct bnx2 *bp)
4177 {
4178         spin_lock(&bp->phy_lock);
4179         if (bp->serdes_an_pending)
4180                 bp->serdes_an_pending--;
4181         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4182                 u32 bmcr;
4183
4184                 bp->current_interval = bp->timer_interval;
4185
4186                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4187
4188                 if (bmcr & BMCR_ANENABLE) {
4189                         u32 phy1, phy2;
4190
4191                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4192                         bnx2_read_phy(bp, 0x1c, &phy1);
4193
4194                         bnx2_write_phy(bp, 0x17, 0x0f01);
4195                         bnx2_read_phy(bp, 0x15, &phy2);
4196                         bnx2_write_phy(bp, 0x17, 0x0f01);
4197                         bnx2_read_phy(bp, 0x15, &phy2);
4198
4199                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4200                                 !(phy2 & 0x20)) {       /* no CONFIG */
4201
4202                                 bmcr &= ~BMCR_ANENABLE;
4203                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4204                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4205                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4206                         }
4207                 }
4208         }
4209         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4210                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4211                 u32 phy2;
4212
4213                 bnx2_write_phy(bp, 0x17, 0x0f01);
4214                 bnx2_read_phy(bp, 0x15, &phy2);
4215                 if (phy2 & 0x20) {
4216                         u32 bmcr;
4217
4218                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4219                         bmcr |= BMCR_ANENABLE;
4220                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4221
4222                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4223                 }
4224         } else
4225                 bp->current_interval = bp->timer_interval;
4226
4227         spin_unlock(&bp->phy_lock);
4228 }
4229
4230 static void
4231 bnx2_timer(unsigned long data)
4232 {
4233         struct bnx2 *bp = (struct bnx2 *) data;
4234         u32 msg;
4235
4236         if (!netif_running(bp->dev))
4237                 return;
4238
4239         if (atomic_read(&bp->intr_sem) != 0)
4240                 goto bnx2_restart_timer;
4241
4242         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4243         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4244
4245         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4246
4247         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4248             (CHIP_NUM(bp) == CHIP_NUM_5706))
4249                 bnx2_5706_serdes_timer(bp);
4250
4251 bnx2_restart_timer:
4252         mod_timer(&bp->timer, jiffies + bp->current_interval);
4253 }
4254
4255 /* Called with rtnl_lock */
4256 static int
4257 bnx2_open(struct net_device *dev)
4258 {
4259         struct bnx2 *bp = netdev_priv(dev);
4260         int rc;
4261
4262         bnx2_set_power_state(bp, PCI_D0);
4263         bnx2_disable_int(bp);
4264
4265         rc = bnx2_alloc_mem(bp);
4266         if (rc)
4267                 return rc;
4268
4269         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4270                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4271                 !disable_msi) {
4272
4273                 if (pci_enable_msi(bp->pdev) == 0) {
4274                         bp->flags |= USING_MSI_FLAG;
4275                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4276                                         dev);
4277                 }
4278                 else {
4279                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4280                                         IRQF_SHARED, dev->name, dev);
4281                 }
4282         }
4283         else {
4284                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4285                                 dev->name, dev);
4286         }
4287         if (rc) {
4288                 bnx2_free_mem(bp);
4289                 return rc;
4290         }
4291
4292         rc = bnx2_init_nic(bp);
4293
4294         if (rc) {
4295                 free_irq(bp->pdev->irq, dev);
4296                 if (bp->flags & USING_MSI_FLAG) {
4297                         pci_disable_msi(bp->pdev);
4298                         bp->flags &= ~USING_MSI_FLAG;
4299                 }
4300                 bnx2_free_skbs(bp);
4301                 bnx2_free_mem(bp);
4302                 return rc;
4303         }
4304
4305         mod_timer(&bp->timer, jiffies + bp->current_interval);
4306
4307         atomic_set(&bp->intr_sem, 0);
4308
4309         bnx2_enable_int(bp);
4310
4311         if (bp->flags & USING_MSI_FLAG) {
4312                 /* Test MSI to make sure it is working
4313                  * If MSI test fails, go back to INTx mode
4314                  */
4315                 if (bnx2_test_intr(bp) != 0) {
4316                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4317                                " using MSI, switching to INTx mode. Please"
4318                                " report this failure to the PCI maintainer"
4319                                " and include system chipset information.\n",
4320                                bp->dev->name);
4321
4322                         bnx2_disable_int(bp);
4323                         free_irq(bp->pdev->irq, dev);
4324                         pci_disable_msi(bp->pdev);
4325                         bp->flags &= ~USING_MSI_FLAG;
4326
4327                         rc = bnx2_init_nic(bp);
4328
4329                         if (!rc) {
4330                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4331                                         IRQF_SHARED, dev->name, dev);
4332                         }
4333                         if (rc) {
4334                                 bnx2_free_skbs(bp);
4335                                 bnx2_free_mem(bp);
4336                                 del_timer_sync(&bp->timer);
4337                                 return rc;
4338                         }
4339                         bnx2_enable_int(bp);
4340                 }
4341         }
4342         if (bp->flags & USING_MSI_FLAG) {
4343                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4344         }
4345
4346         netif_start_queue(dev);
4347
4348         return 0;
4349 }
4350
4351 static void
4352 bnx2_reset_task(void *data)
4353 {
4354         struct bnx2 *bp = data;
4355
4356         if (!netif_running(bp->dev))
4357                 return;
4358
4359         bp->in_reset_task = 1;
4360         bnx2_netif_stop(bp);
4361
4362         bnx2_init_nic(bp);
4363
4364         atomic_set(&bp->intr_sem, 1);
4365         bnx2_netif_start(bp);
4366         bp->in_reset_task = 0;
4367 }
4368
4369 static void
4370 bnx2_tx_timeout(struct net_device *dev)
4371 {
4372         struct bnx2 *bp = netdev_priv(dev);
4373
4374         /* This allows the netif to be shutdown gracefully before resetting */
4375         schedule_work(&bp->reset_task);
4376 }
4377
4378 #ifdef BCM_VLAN
4379 /* Called with rtnl_lock */
4380 static void
4381 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4382 {
4383         struct bnx2 *bp = netdev_priv(dev);
4384
4385         bnx2_netif_stop(bp);
4386
4387         bp->vlgrp = vlgrp;
4388         bnx2_set_rx_mode(dev);
4389
4390         bnx2_netif_start(bp);
4391 }
4392
4393 /* Called with rtnl_lock */
4394 static void
4395 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4396 {
4397         struct bnx2 *bp = netdev_priv(dev);
4398
4399         bnx2_netif_stop(bp);
4400
4401         if (bp->vlgrp)
4402                 bp->vlgrp->vlan_devices[vid] = NULL;
4403         bnx2_set_rx_mode(dev);
4404
4405         bnx2_netif_start(bp);
4406 }
4407 #endif
4408
4409 /* Called with netif_tx_lock.
4410  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4411  * netif_wake_queue().
4412  */
4413 static int
4414 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4415 {
4416         struct bnx2 *bp = netdev_priv(dev);
4417         dma_addr_t mapping;
4418         struct tx_bd *txbd;
4419         struct sw_bd *tx_buf;
4420         u32 len, vlan_tag_flags, last_frag, mss;
4421         u16 prod, ring_prod;
4422         int i;
4423
4424         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4425                 netif_stop_queue(dev);
4426                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4427                         dev->name);
4428
4429                 return NETDEV_TX_BUSY;
4430         }
4431         len = skb_headlen(skb);
4432         prod = bp->tx_prod;
4433         ring_prod = TX_RING_IDX(prod);
4434
4435         vlan_tag_flags = 0;
4436         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4437                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4438         }
4439
4440         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4441                 vlan_tag_flags |=
4442                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4443         }
4444 #ifdef BCM_TSO
4445         if ((mss = skb_shinfo(skb)->gso_size) &&
4446                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4447                 u32 tcp_opt_len, ip_tcp_len;
4448
4449                 if (skb_header_cloned(skb) &&
4450                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4451                         dev_kfree_skb(skb);
4452                         return NETDEV_TX_OK;
4453                 }
4454
4455                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4456                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4457
4458                 tcp_opt_len = 0;
4459                 if (skb->h.th->doff > 5) {
4460                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4461                 }
4462                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4463
4464                 skb->nh.iph->check = 0;
4465                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4466                 skb->h.th->check =
4467                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4468                                             skb->nh.iph->daddr,
4469                                             0, IPPROTO_TCP, 0);
4470
4471                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4472                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4473                                 (tcp_opt_len >> 2)) << 8;
4474                 }
4475         }
4476         else
4477 #endif
4478         {
4479                 mss = 0;
4480         }
4481
4482         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4483
4484         tx_buf = &bp->tx_buf_ring[ring_prod];
4485         tx_buf->skb = skb;
4486         pci_unmap_addr_set(tx_buf, mapping, mapping);
4487
4488         txbd = &bp->tx_desc_ring[ring_prod];
4489
4490         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4491         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4492         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4493         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4494
4495         last_frag = skb_shinfo(skb)->nr_frags;
4496
4497         for (i = 0; i < last_frag; i++) {
4498                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4499
4500                 prod = NEXT_TX_BD(prod);
4501                 ring_prod = TX_RING_IDX(prod);
4502                 txbd = &bp->tx_desc_ring[ring_prod];
4503
4504                 len = frag->size;
4505                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4506                         len, PCI_DMA_TODEVICE);
4507                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4508                                 mapping, mapping);
4509
4510                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4511                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4512                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4513                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4514
4515         }
4516         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4517
4518         prod = NEXT_TX_BD(prod);
4519         bp->tx_prod_bseq += skb->len;
4520
4521         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4522         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4523
4524         mmiowb();
4525
4526         bp->tx_prod = prod;
4527         dev->trans_start = jiffies;
4528
4529         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4530                 netif_stop_queue(dev);
4531                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4532                         netif_wake_queue(dev);
4533         }
4534
4535         return NETDEV_TX_OK;
4536 }
4537
4538 /* Called with rtnl_lock */
4539 static int
4540 bnx2_close(struct net_device *dev)
4541 {
4542         struct bnx2 *bp = netdev_priv(dev);
4543         u32 reset_code;
4544
4545         /* Calling flush_scheduled_work() may deadlock because
4546          * linkwatch_event() may be on the workqueue and it will try to get
4547          * the rtnl_lock which we are holding.
4548          */
4549         while (bp->in_reset_task)
4550                 msleep(1);
4551
4552         bnx2_netif_stop(bp);
4553         del_timer_sync(&bp->timer);
4554         if (bp->flags & NO_WOL_FLAG)
4555                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4556         else if (bp->wol)
4557                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4558         else
4559                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4560         bnx2_reset_chip(bp, reset_code);
4561         free_irq(bp->pdev->irq, dev);
4562         if (bp->flags & USING_MSI_FLAG) {
4563                 pci_disable_msi(bp->pdev);
4564                 bp->flags &= ~USING_MSI_FLAG;
4565         }
4566         bnx2_free_skbs(bp);
4567         bnx2_free_mem(bp);
4568         bp->link_up = 0;
4569         netif_carrier_off(bp->dev);
4570         bnx2_set_power_state(bp, PCI_D3hot);
4571         return 0;
4572 }
4573
4574 #define GET_NET_STATS64(ctr)                                    \
4575         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4576         (unsigned long) (ctr##_lo)
4577
4578 #define GET_NET_STATS32(ctr)            \
4579         (ctr##_lo)
4580
4581 #if (BITS_PER_LONG == 64)
4582 #define GET_NET_STATS   GET_NET_STATS64
4583 #else
4584 #define GET_NET_STATS   GET_NET_STATS32
4585 #endif
4586
4587 static struct net_device_stats *
4588 bnx2_get_stats(struct net_device *dev)
4589 {
4590         struct bnx2 *bp = netdev_priv(dev);
4591         struct statistics_block *stats_blk = bp->stats_blk;
4592         struct net_device_stats *net_stats = &bp->net_stats;
4593
4594         if (bp->stats_blk == NULL) {
4595                 return net_stats;
4596         }
4597         net_stats->rx_packets =
4598                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4599                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4600                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4601
4602         net_stats->tx_packets =
4603                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4604                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4605                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4606
4607         net_stats->rx_bytes =
4608                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4609
4610         net_stats->tx_bytes =
4611                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4612
4613         net_stats->multicast =
4614                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4615
4616         net_stats->collisions =
4617                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4618
4619         net_stats->rx_length_errors =
4620                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4621                 stats_blk->stat_EtherStatsOverrsizePkts);
4622
4623         net_stats->rx_over_errors =
4624                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4625
4626         net_stats->rx_frame_errors =
4627                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4628
4629         net_stats->rx_crc_errors =
4630                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4631
4632         net_stats->rx_errors = net_stats->rx_length_errors +
4633                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4634                 net_stats->rx_crc_errors;
4635
4636         net_stats->tx_aborted_errors =
4637                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4638                 stats_blk->stat_Dot3StatsLateCollisions);
4639
4640         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4641             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4642                 net_stats->tx_carrier_errors = 0;
4643         else {
4644                 net_stats->tx_carrier_errors =
4645                         (unsigned long)
4646                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4647         }
4648
4649         net_stats->tx_errors =
4650                 (unsigned long)
4651                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4652                 +
4653                 net_stats->tx_aborted_errors +
4654                 net_stats->tx_carrier_errors;
4655
4656         net_stats->rx_missed_errors =
4657                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4658                 stats_blk->stat_FwRxDrop);
4659
4660         return net_stats;
4661 }
4662
4663 /* All ethtool functions called with rtnl_lock */
4664
4665 static int
4666 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4667 {
4668         struct bnx2 *bp = netdev_priv(dev);
4669
4670         cmd->supported = SUPPORTED_Autoneg;
4671         if (bp->phy_flags & PHY_SERDES_FLAG) {
4672                 cmd->supported |= SUPPORTED_1000baseT_Full |
4673                         SUPPORTED_FIBRE;
4674
4675                 cmd->port = PORT_FIBRE;
4676         }
4677         else {
4678                 cmd->supported |= SUPPORTED_10baseT_Half |
4679                         SUPPORTED_10baseT_Full |
4680                         SUPPORTED_100baseT_Half |
4681                         SUPPORTED_100baseT_Full |
4682                         SUPPORTED_1000baseT_Full |
4683                         SUPPORTED_TP;
4684
4685                 cmd->port = PORT_TP;
4686         }
4687
4688         cmd->advertising = bp->advertising;
4689
4690         if (bp->autoneg & AUTONEG_SPEED) {
4691                 cmd->autoneg = AUTONEG_ENABLE;
4692         }
4693         else {
4694                 cmd->autoneg = AUTONEG_DISABLE;
4695         }
4696
4697         if (netif_carrier_ok(dev)) {
4698                 cmd->speed = bp->line_speed;
4699                 cmd->duplex = bp->duplex;
4700         }
4701         else {
4702                 cmd->speed = -1;
4703                 cmd->duplex = -1;
4704         }
4705
4706         cmd->transceiver = XCVR_INTERNAL;
4707         cmd->phy_address = bp->phy_addr;
4708
4709         return 0;
4710 }
4711
4712 static int
4713 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4714 {
4715         struct bnx2 *bp = netdev_priv(dev);
4716         u8 autoneg = bp->autoneg;
4717         u8 req_duplex = bp->req_duplex;
4718         u16 req_line_speed = bp->req_line_speed;
4719         u32 advertising = bp->advertising;
4720
4721         if (cmd->autoneg == AUTONEG_ENABLE) {
4722                 autoneg |= AUTONEG_SPEED;
4723
4724                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4725
4726                 /* allow advertising 1 speed */
4727                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4728                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4729                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4730                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4731
4732                         if (bp->phy_flags & PHY_SERDES_FLAG)
4733                                 return -EINVAL;
4734
4735                         advertising = cmd->advertising;
4736
4737                 }
4738                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4739                         advertising = cmd->advertising;
4740                 }
4741                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4742                         return -EINVAL;
4743                 }
4744                 else {
4745                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4746                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4747                         }
4748                         else {
4749                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4750                         }
4751                 }
4752                 advertising |= ADVERTISED_Autoneg;
4753         }
4754         else {
4755                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4756                         if ((cmd->speed != SPEED_1000 &&
4757                              cmd->speed != SPEED_2500) ||
4758                             (cmd->duplex != DUPLEX_FULL))
4759                                 return -EINVAL;
4760
4761                         if (cmd->speed == SPEED_2500 &&
4762                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4763                                 return -EINVAL;
4764                 }
4765                 else if (cmd->speed == SPEED_1000) {
4766                         return -EINVAL;
4767                 }
4768                 autoneg &= ~AUTONEG_SPEED;
4769                 req_line_speed = cmd->speed;
4770                 req_duplex = cmd->duplex;
4771                 advertising = 0;
4772         }
4773
4774         bp->autoneg = autoneg;
4775         bp->advertising = advertising;
4776         bp->req_line_speed = req_line_speed;
4777         bp->req_duplex = req_duplex;
4778
4779         spin_lock_bh(&bp->phy_lock);
4780
4781         bnx2_setup_phy(bp);
4782
4783         spin_unlock_bh(&bp->phy_lock);
4784
4785         return 0;
4786 }
4787
4788 static void
4789 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4790 {
4791         struct bnx2 *bp = netdev_priv(dev);
4792
4793         strcpy(info->driver, DRV_MODULE_NAME);
4794         strcpy(info->version, DRV_MODULE_VERSION);
4795         strcpy(info->bus_info, pci_name(bp->pdev));
4796         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4797         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4798         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4799         info->fw_version[1] = info->fw_version[3] = '.';
4800         info->fw_version[5] = 0;
4801 }
4802
4803 #define BNX2_REGDUMP_LEN                (32 * 1024)
4804
4805 static int
4806 bnx2_get_regs_len(struct net_device *dev)
4807 {
4808         return BNX2_REGDUMP_LEN;
4809 }
4810
4811 static void
4812 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4813 {
4814         u32 *p = _p, i, offset;
4815         u8 *orig_p = _p;
4816         struct bnx2 *bp = netdev_priv(dev);
4817         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4818                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4819                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4820                                  0x1040, 0x1048, 0x1080, 0x10a4,
4821                                  0x1400, 0x1490, 0x1498, 0x14f0,
4822                                  0x1500, 0x155c, 0x1580, 0x15dc,
4823                                  0x1600, 0x1658, 0x1680, 0x16d8,
4824                                  0x1800, 0x1820, 0x1840, 0x1854,
4825                                  0x1880, 0x1894, 0x1900, 0x1984,
4826                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4827                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4828                                  0x2000, 0x2030, 0x23c0, 0x2400,
4829                                  0x2800, 0x2820, 0x2830, 0x2850,
4830                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4831                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4832                                  0x4080, 0x4090, 0x43c0, 0x4458,
4833                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4834                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4835                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4836                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4837                                  0x6800, 0x6848, 0x684c, 0x6860,
4838                                  0x6888, 0x6910, 0x8000 };
4839
4840         regs->version = 0;
4841
4842         memset(p, 0, BNX2_REGDUMP_LEN);
4843
4844         if (!netif_running(bp->dev))
4845                 return;
4846
4847         i = 0;
4848         offset = reg_boundaries[0];
4849         p += offset;
4850         while (offset < BNX2_REGDUMP_LEN) {
4851                 *p++ = REG_RD(bp, offset);
4852                 offset += 4;
4853                 if (offset == reg_boundaries[i + 1]) {
4854                         offset = reg_boundaries[i + 2];
4855                         p = (u32 *) (orig_p + offset);
4856                         i += 2;
4857                 }
4858         }
4859 }
4860
4861 static void
4862 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4863 {
4864         struct bnx2 *bp = netdev_priv(dev);
4865
4866         if (bp->flags & NO_WOL_FLAG) {
4867                 wol->supported = 0;
4868                 wol->wolopts = 0;
4869         }
4870         else {
4871                 wol->supported = WAKE_MAGIC;
4872                 if (bp->wol)
4873                         wol->wolopts = WAKE_MAGIC;
4874                 else
4875                         wol->wolopts = 0;
4876         }
4877         memset(&wol->sopass, 0, sizeof(wol->sopass));
4878 }
4879
4880 static int
4881 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4882 {
4883         struct bnx2 *bp = netdev_priv(dev);
4884
4885         if (wol->wolopts & ~WAKE_MAGIC)
4886                 return -EINVAL;
4887
4888         if (wol->wolopts & WAKE_MAGIC) {
4889                 if (bp->flags & NO_WOL_FLAG)
4890                         return -EINVAL;
4891
4892                 bp->wol = 1;
4893         }
4894         else {
4895                 bp->wol = 0;
4896         }
4897         return 0;
4898 }
4899
4900 static int
4901 bnx2_nway_reset(struct net_device *dev)
4902 {
4903         struct bnx2 *bp = netdev_priv(dev);
4904         u32 bmcr;
4905
4906         if (!(bp->autoneg & AUTONEG_SPEED)) {
4907                 return -EINVAL;
4908         }
4909
4910         spin_lock_bh(&bp->phy_lock);
4911
4912         /* Force a link down visible on the other side */
4913         if (bp->phy_flags & PHY_SERDES_FLAG) {
4914                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4915                 spin_unlock_bh(&bp->phy_lock);
4916
4917                 msleep(20);
4918
4919                 spin_lock_bh(&bp->phy_lock);
4920                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
4921                         bp->current_interval = SERDES_AN_TIMEOUT;
4922                         bp->serdes_an_pending = 1;
4923                         mod_timer(&bp->timer, jiffies + bp->current_interval);
4924                 }
4925         }
4926
4927         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4928         bmcr &= ~BMCR_LOOPBACK;
4929         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4930
4931         spin_unlock_bh(&bp->phy_lock);
4932
4933         return 0;
4934 }
4935
4936 static int
4937 bnx2_get_eeprom_len(struct net_device *dev)
4938 {
4939         struct bnx2 *bp = netdev_priv(dev);
4940
4941         if (bp->flash_info == NULL)
4942                 return 0;
4943
4944         return (int) bp->flash_size;
4945 }
4946
4947 static int
4948 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4949                 u8 *eebuf)
4950 {
4951         struct bnx2 *bp = netdev_priv(dev);
4952         int rc;
4953
4954         /* parameters already validated in ethtool_get_eeprom */
4955
4956         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4957
4958         return rc;
4959 }
4960
4961 static int
4962 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4963                 u8 *eebuf)
4964 {
4965         struct bnx2 *bp = netdev_priv(dev);
4966         int rc;
4967
4968         /* parameters already validated in ethtool_set_eeprom */
4969
4970         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4971
4972         return rc;
4973 }
4974
4975 static int
4976 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4977 {
4978         struct bnx2 *bp = netdev_priv(dev);
4979
4980         memset(coal, 0, sizeof(struct ethtool_coalesce));
4981
4982         coal->rx_coalesce_usecs = bp->rx_ticks;
4983         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4984         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4985         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4986
4987         coal->tx_coalesce_usecs = bp->tx_ticks;
4988         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4989         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4990         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4991
4992         coal->stats_block_coalesce_usecs = bp->stats_ticks;
4993
4994         return 0;
4995 }
4996
4997 static int
4998 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4999 {
5000         struct bnx2 *bp = netdev_priv(dev);
5001
5002         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5003         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5004
5005         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5006         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5007
5008         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5009         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5010
5011         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5012         if (bp->rx_quick_cons_trip_int > 0xff)
5013                 bp->rx_quick_cons_trip_int = 0xff;
5014
5015         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5016         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5017
5018         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5019         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5020
5021         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5022         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5023
5024         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5025         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5026                 0xff;
5027
5028         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5029         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5030         bp->stats_ticks &= 0xffff00;
5031
5032         if (netif_running(bp->dev)) {
5033                 bnx2_netif_stop(bp);
5034                 bnx2_init_nic(bp);
5035                 bnx2_netif_start(bp);
5036         }
5037
5038         return 0;
5039 }
5040
5041 static void
5042 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5043 {
5044         struct bnx2 *bp = netdev_priv(dev);
5045
5046         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5047         ering->rx_mini_max_pending = 0;
5048         ering->rx_jumbo_max_pending = 0;
5049
5050         ering->rx_pending = bp->rx_ring_size;
5051         ering->rx_mini_pending = 0;
5052         ering->rx_jumbo_pending = 0;
5053
5054         ering->tx_max_pending = MAX_TX_DESC_CNT;
5055         ering->tx_pending = bp->tx_ring_size;
5056 }
5057
5058 static int
5059 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5060 {
5061         struct bnx2 *bp = netdev_priv(dev);
5062
5063         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5064                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5065                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5066
5067                 return -EINVAL;
5068         }
5069         if (netif_running(bp->dev)) {
5070                 bnx2_netif_stop(bp);
5071                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5072                 bnx2_free_skbs(bp);
5073                 bnx2_free_mem(bp);
5074         }
5075
5076         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5077         bp->tx_ring_size = ering->tx_pending;
5078
5079         if (netif_running(bp->dev)) {
5080                 int rc;
5081
5082                 rc = bnx2_alloc_mem(bp);
5083                 if (rc)
5084                         return rc;
5085                 bnx2_init_nic(bp);
5086                 bnx2_netif_start(bp);
5087         }
5088
5089         return 0;
5090 }
5091
5092 static void
5093 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5094 {
5095         struct bnx2 *bp = netdev_priv(dev);
5096
5097         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5098         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5099         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5100 }
5101
5102 static int
5103 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5104 {
5105         struct bnx2 *bp = netdev_priv(dev);
5106
5107         bp->req_flow_ctrl = 0;
5108         if (epause->rx_pause)
5109                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5110         if (epause->tx_pause)
5111                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5112
5113         if (epause->autoneg) {
5114                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5115         }
5116         else {
5117                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5118         }
5119
5120         spin_lock_bh(&bp->phy_lock);
5121
5122         bnx2_setup_phy(bp);
5123
5124         spin_unlock_bh(&bp->phy_lock);
5125
5126         return 0;
5127 }
5128
5129 static u32
5130 bnx2_get_rx_csum(struct net_device *dev)
5131 {
5132         struct bnx2 *bp = netdev_priv(dev);
5133
5134         return bp->rx_csum;
5135 }
5136
5137 static int
5138 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5139 {
5140         struct bnx2 *bp = netdev_priv(dev);
5141
5142         bp->rx_csum = data;
5143         return 0;
5144 }
5145
5146 static int
5147 bnx2_set_tso(struct net_device *dev, u32 data)
5148 {
5149         if (data)
5150                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5151         else
5152                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5153         return 0;
5154 }
5155
5156 #define BNX2_NUM_STATS 46
5157
5158 static struct {
5159         char string[ETH_GSTRING_LEN];
5160 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5161         { "rx_bytes" },
5162         { "rx_error_bytes" },
5163         { "tx_bytes" },
5164         { "tx_error_bytes" },
5165         { "rx_ucast_packets" },
5166         { "rx_mcast_packets" },
5167         { "rx_bcast_packets" },
5168         { "tx_ucast_packets" },
5169         { "tx_mcast_packets" },
5170         { "tx_bcast_packets" },
5171         { "tx_mac_errors" },
5172         { "tx_carrier_errors" },
5173         { "rx_crc_errors" },
5174         { "rx_align_errors" },
5175         { "tx_single_collisions" },
5176         { "tx_multi_collisions" },
5177         { "tx_deferred" },
5178         { "tx_excess_collisions" },
5179         { "tx_late_collisions" },
5180         { "tx_total_collisions" },
5181         { "rx_fragments" },
5182         { "rx_jabbers" },
5183         { "rx_undersize_packets" },
5184         { "rx_oversize_packets" },
5185         { "rx_64_byte_packets" },
5186         { "rx_65_to_127_byte_packets" },
5187         { "rx_128_to_255_byte_packets" },
5188         { "rx_256_to_511_byte_packets" },
5189         { "rx_512_to_1023_byte_packets" },
5190         { "rx_1024_to_1522_byte_packets" },
5191         { "rx_1523_to_9022_byte_packets" },
5192         { "tx_64_byte_packets" },
5193         { "tx_65_to_127_byte_packets" },
5194         { "tx_128_to_255_byte_packets" },
5195         { "tx_256_to_511_byte_packets" },
5196         { "tx_512_to_1023_byte_packets" },
5197         { "tx_1024_to_1522_byte_packets" },
5198         { "tx_1523_to_9022_byte_packets" },
5199         { "rx_xon_frames" },
5200         { "rx_xoff_frames" },
5201         { "tx_xon_frames" },
5202         { "tx_xoff_frames" },
5203         { "rx_mac_ctrl_frames" },
5204         { "rx_filtered_packets" },
5205         { "rx_discards" },
5206         { "rx_fw_discards" },
5207 };
5208
5209 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5210
5211 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5212     STATS_OFFSET32(stat_IfHCInOctets_hi),
5213     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5214     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5215     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5216     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5217     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5218     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5219     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5220     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5221     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5222     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5223     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5224     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5225     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5226     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5227     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5228     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5229     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5230     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5231     STATS_OFFSET32(stat_EtherStatsCollisions),
5232     STATS_OFFSET32(stat_EtherStatsFragments),
5233     STATS_OFFSET32(stat_EtherStatsJabbers),
5234     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5235     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5236     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5237     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5238     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5239     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5240     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5241     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5242     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5243     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5244     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5245     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5246     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5247     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5248     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5249     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5250     STATS_OFFSET32(stat_XonPauseFramesReceived),
5251     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5252     STATS_OFFSET32(stat_OutXonSent),
5253     STATS_OFFSET32(stat_OutXoffSent),
5254     STATS_OFFSET32(stat_MacControlFramesReceived),
5255     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5256     STATS_OFFSET32(stat_IfInMBUFDiscards),
5257     STATS_OFFSET32(stat_FwRxDrop),
5258 };
5259
5260 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5261  * skipped because of errata.
5262  */
5263 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5264         8,0,8,8,8,8,8,8,8,8,
5265         4,0,4,4,4,4,4,4,4,4,
5266         4,4,4,4,4,4,4,4,4,4,
5267         4,4,4,4,4,4,4,4,4,4,
5268         4,4,4,4,4,4,
5269 };
5270
5271 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5272         8,0,8,8,8,8,8,8,8,8,
5273         4,4,4,4,4,4,4,4,4,4,
5274         4,4,4,4,4,4,4,4,4,4,
5275         4,4,4,4,4,4,4,4,4,4,
5276         4,4,4,4,4,4,
5277 };
5278
5279 #define BNX2_NUM_TESTS 6
5280
5281 static struct {
5282         char string[ETH_GSTRING_LEN];
5283 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5284         { "register_test (offline)" },
5285         { "memory_test (offline)" },
5286         { "loopback_test (offline)" },
5287         { "nvram_test (online)" },
5288         { "interrupt_test (online)" },
5289         { "link_test (online)" },
5290 };
5291
5292 static int
5293 bnx2_self_test_count(struct net_device *dev)
5294 {
5295         return BNX2_NUM_TESTS;
5296 }
5297
5298 static void
5299 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5300 {
5301         struct bnx2 *bp = netdev_priv(dev);
5302
5303         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5304         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5305                 int i;
5306
5307                 bnx2_netif_stop(bp);
5308                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5309                 bnx2_free_skbs(bp);
5310
5311                 if (bnx2_test_registers(bp) != 0) {
5312                         buf[0] = 1;
5313                         etest->flags |= ETH_TEST_FL_FAILED;
5314                 }
5315                 if (bnx2_test_memory(bp) != 0) {
5316                         buf[1] = 1;
5317                         etest->flags |= ETH_TEST_FL_FAILED;
5318                 }
5319                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5320                         etest->flags |= ETH_TEST_FL_FAILED;
5321
5322                 if (!netif_running(bp->dev)) {
5323                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5324                 }
5325                 else {
5326                         bnx2_init_nic(bp);
5327                         bnx2_netif_start(bp);
5328                 }
5329
5330                 /* wait for link up */
5331                 for (i = 0; i < 7; i++) {
5332                         if (bp->link_up)
5333                                 break;
5334                         msleep_interruptible(1000);
5335                 }
5336         }
5337
5338         if (bnx2_test_nvram(bp) != 0) {
5339                 buf[3] = 1;
5340                 etest->flags |= ETH_TEST_FL_FAILED;
5341         }
5342         if (bnx2_test_intr(bp) != 0) {
5343                 buf[4] = 1;
5344                 etest->flags |= ETH_TEST_FL_FAILED;
5345         }
5346
5347         if (bnx2_test_link(bp) != 0) {
5348                 buf[5] = 1;
5349                 etest->flags |= ETH_TEST_FL_FAILED;
5350
5351         }
5352 }
5353
5354 static void
5355 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5356 {
5357         switch (stringset) {
5358         case ETH_SS_STATS:
5359                 memcpy(buf, bnx2_stats_str_arr,
5360                         sizeof(bnx2_stats_str_arr));
5361                 break;
5362         case ETH_SS_TEST:
5363                 memcpy(buf, bnx2_tests_str_arr,
5364                         sizeof(bnx2_tests_str_arr));
5365                 break;
5366         }
5367 }
5368
5369 static int
5370 bnx2_get_stats_count(struct net_device *dev)
5371 {
5372         return BNX2_NUM_STATS;
5373 }
5374
5375 static void
5376 bnx2_get_ethtool_stats(struct net_device *dev,
5377                 struct ethtool_stats *stats, u64 *buf)
5378 {
5379         struct bnx2 *bp = netdev_priv(dev);
5380         int i;
5381         u32 *hw_stats = (u32 *) bp->stats_blk;
5382         u8 *stats_len_arr = NULL;
5383
5384         if (hw_stats == NULL) {
5385                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5386                 return;
5387         }
5388
5389         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5390             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5391             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5392             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5393                 stats_len_arr = bnx2_5706_stats_len_arr;
5394         else
5395                 stats_len_arr = bnx2_5708_stats_len_arr;
5396
5397         for (i = 0; i < BNX2_NUM_STATS; i++) {
5398                 if (stats_len_arr[i] == 0) {
5399                         /* skip this counter */
5400                         buf[i] = 0;
5401                         continue;
5402                 }
5403                 if (stats_len_arr[i] == 4) {
5404                         /* 4-byte counter */
5405                         buf[i] = (u64)
5406                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5407                         continue;
5408                 }
5409                 /* 8-byte counter */
5410                 buf[i] = (((u64) *(hw_stats +
5411                                         bnx2_stats_offset_arr[i])) << 32) +
5412                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5413         }
5414 }
5415
5416 static int
5417 bnx2_phys_id(struct net_device *dev, u32 data)
5418 {
5419         struct bnx2 *bp = netdev_priv(dev);
5420         int i;
5421         u32 save;
5422
5423         if (data == 0)
5424                 data = 2;
5425
5426         save = REG_RD(bp, BNX2_MISC_CFG);
5427         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5428
5429         for (i = 0; i < (data * 2); i++) {
5430                 if ((i % 2) == 0) {
5431                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5432                 }
5433                 else {
5434                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5435                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5436                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5437                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5438                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5439                                 BNX2_EMAC_LED_TRAFFIC);
5440                 }
5441                 msleep_interruptible(500);
5442                 if (signal_pending(current))
5443                         break;
5444         }
5445         REG_WR(bp, BNX2_EMAC_LED, 0);
5446         REG_WR(bp, BNX2_MISC_CFG, save);
5447         return 0;
5448 }
5449
5450 static const struct ethtool_ops bnx2_ethtool_ops = {
5451         .get_settings           = bnx2_get_settings,
5452         .set_settings           = bnx2_set_settings,
5453         .get_drvinfo            = bnx2_get_drvinfo,
5454         .get_regs_len           = bnx2_get_regs_len,
5455         .get_regs               = bnx2_get_regs,
5456         .get_wol                = bnx2_get_wol,
5457         .set_wol                = bnx2_set_wol,
5458         .nway_reset             = bnx2_nway_reset,
5459         .get_link               = ethtool_op_get_link,
5460         .get_eeprom_len         = bnx2_get_eeprom_len,
5461         .get_eeprom             = bnx2_get_eeprom,
5462         .set_eeprom             = bnx2_set_eeprom,
5463         .get_coalesce           = bnx2_get_coalesce,
5464         .set_coalesce           = bnx2_set_coalesce,
5465         .get_ringparam          = bnx2_get_ringparam,
5466         .set_ringparam          = bnx2_set_ringparam,
5467         .get_pauseparam         = bnx2_get_pauseparam,
5468         .set_pauseparam         = bnx2_set_pauseparam,
5469         .get_rx_csum            = bnx2_get_rx_csum,
5470         .set_rx_csum            = bnx2_set_rx_csum,
5471         .get_tx_csum            = ethtool_op_get_tx_csum,
5472         .set_tx_csum            = ethtool_op_set_tx_csum,
5473         .get_sg                 = ethtool_op_get_sg,
5474         .set_sg                 = ethtool_op_set_sg,
5475 #ifdef BCM_TSO
5476         .get_tso                = ethtool_op_get_tso,
5477         .set_tso                = bnx2_set_tso,
5478 #endif
5479         .self_test_count        = bnx2_self_test_count,
5480         .self_test              = bnx2_self_test,
5481         .get_strings            = bnx2_get_strings,
5482         .phys_id                = bnx2_phys_id,
5483         .get_stats_count        = bnx2_get_stats_count,
5484         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5485         .get_perm_addr          = ethtool_op_get_perm_addr,
5486 };
5487
5488 /* Called with rtnl_lock */
5489 static int
5490 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5491 {
5492         struct mii_ioctl_data *data = if_mii(ifr);
5493         struct bnx2 *bp = netdev_priv(dev);
5494         int err;
5495
5496         switch(cmd) {
5497         case SIOCGMIIPHY:
5498                 data->phy_id = bp->phy_addr;
5499
5500                 /* fallthru */
5501         case SIOCGMIIREG: {
5502                 u32 mii_regval;
5503
5504                 spin_lock_bh(&bp->phy_lock);
5505                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5506                 spin_unlock_bh(&bp->phy_lock);
5507
5508                 data->val_out = mii_regval;
5509
5510                 return err;
5511         }
5512
5513         case SIOCSMIIREG:
5514                 if (!capable(CAP_NET_ADMIN))
5515                         return -EPERM;
5516
5517                 spin_lock_bh(&bp->phy_lock);
5518                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5519                 spin_unlock_bh(&bp->phy_lock);
5520
5521                 return err;
5522
5523         default:
5524                 /* do nothing */
5525                 break;
5526         }
5527         return -EOPNOTSUPP;
5528 }
5529
5530 /* Called with rtnl_lock */
5531 static int
5532 bnx2_change_mac_addr(struct net_device *dev, void *p)
5533 {
5534         struct sockaddr *addr = p;
5535         struct bnx2 *bp = netdev_priv(dev);
5536
5537         if (!is_valid_ether_addr(addr->sa_data))
5538                 return -EINVAL;
5539
5540         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5541         if (netif_running(dev))
5542                 bnx2_set_mac_addr(bp);
5543
5544         return 0;
5545 }
5546
5547 /* Called with rtnl_lock */
5548 static int
5549 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5550 {
5551         struct bnx2 *bp = netdev_priv(dev);
5552
5553         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5554                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5555                 return -EINVAL;
5556
5557         dev->mtu = new_mtu;
5558         if (netif_running(dev)) {
5559                 bnx2_netif_stop(bp);
5560
5561                 bnx2_init_nic(bp);
5562
5563                 bnx2_netif_start(bp);
5564         }
5565         return 0;
5566 }
5567
5568 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5569 static void
5570 poll_bnx2(struct net_device *dev)
5571 {
5572         struct bnx2 *bp = netdev_priv(dev);
5573
5574         disable_irq(bp->pdev->irq);
5575         bnx2_interrupt(bp->pdev->irq, dev);
5576         enable_irq(bp->pdev->irq);
5577 }
5578 #endif
5579
5580 static int __devinit
5581 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5582 {
5583         struct bnx2 *bp;
5584         unsigned long mem_len;
5585         int rc;
5586         u32 reg;
5587
5588         SET_MODULE_OWNER(dev);
5589         SET_NETDEV_DEV(dev, &pdev->dev);
5590         bp = netdev_priv(dev);
5591
5592         bp->flags = 0;
5593         bp->phy_flags = 0;
5594
5595         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5596         rc = pci_enable_device(pdev);
5597         if (rc) {
5598                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5599                 goto err_out;
5600         }
5601
5602         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5603                 dev_err(&pdev->dev,
5604                         "Cannot find PCI device base address, aborting.\n");
5605                 rc = -ENODEV;
5606                 goto err_out_disable;
5607         }
5608
5609         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5610         if (rc) {
5611                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5612                 goto err_out_disable;
5613         }
5614
5615         pci_set_master(pdev);
5616
5617         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5618         if (bp->pm_cap == 0) {
5619                 dev_err(&pdev->dev,
5620                         "Cannot find power management capability, aborting.\n");
5621                 rc = -EIO;
5622                 goto err_out_release;
5623         }
5624
5625         bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5626         if (bp->pcix_cap == 0) {
5627                 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5628                 rc = -EIO;
5629                 goto err_out_release;
5630         }
5631
5632         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5633                 bp->flags |= USING_DAC_FLAG;
5634                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5635                         dev_err(&pdev->dev,
5636                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5637                         rc = -EIO;
5638                         goto err_out_release;
5639                 }
5640         }
5641         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5642                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5643                 rc = -EIO;
5644                 goto err_out_release;
5645         }
5646
5647         bp->dev = dev;
5648         bp->pdev = pdev;
5649
5650         spin_lock_init(&bp->phy_lock);
5651         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5652
5653         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5654         mem_len = MB_GET_CID_ADDR(17);
5655         dev->mem_end = dev->mem_start + mem_len;
5656         dev->irq = pdev->irq;
5657
5658         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5659
5660         if (!bp->regview) {
5661                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5662                 rc = -ENOMEM;
5663                 goto err_out_release;
5664         }
5665
5666         /* Configure byte swap and enable write to the reg_window registers.
5667          * Rely on CPU to do target byte swapping on big endian systems
5668          * The chip's target access swapping will not swap all accesses
5669          */
5670         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5671                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5672                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5673
5674         bnx2_set_power_state(bp, PCI_D0);
5675
5676         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5677
5678         /* Get bus information. */
5679         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5680         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5681                 u32 clkreg;
5682
5683                 bp->flags |= PCIX_FLAG;
5684
5685                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5686
5687                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5688                 switch (clkreg) {
5689                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5690                         bp->bus_speed_mhz = 133;
5691                         break;
5692
5693                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5694                         bp->bus_speed_mhz = 100;
5695                         break;
5696
5697                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5698                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5699                         bp->bus_speed_mhz = 66;
5700                         break;
5701
5702                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5703                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5704                         bp->bus_speed_mhz = 50;
5705                         break;
5706
5707                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5708                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5709                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5710                         bp->bus_speed_mhz = 33;
5711                         break;
5712                 }
5713         }
5714         else {
5715                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5716                         bp->bus_speed_mhz = 66;
5717                 else
5718                         bp->bus_speed_mhz = 33;
5719         }
5720
5721         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5722                 bp->flags |= PCI_32BIT_FLAG;
5723
5724         /* 5706A0 may falsely detect SERR and PERR. */
5725         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5726                 reg = REG_RD(bp, PCI_COMMAND);
5727                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5728                 REG_WR(bp, PCI_COMMAND, reg);
5729         }
5730         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5731                 !(bp->flags & PCIX_FLAG)) {
5732
5733                 dev_err(&pdev->dev,
5734                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5735                 goto err_out_unmap;
5736         }
5737
5738         bnx2_init_nvram(bp);
5739
5740         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5741
5742         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5743             BNX2_SHM_HDR_SIGNATURE_SIG)
5744                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5745         else
5746                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5747
5748         /* Get the permanent MAC address.  First we need to make sure the
5749          * firmware is actually running.
5750          */
5751         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5752
5753         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5754             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5755                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5756                 rc = -ENODEV;
5757                 goto err_out_unmap;
5758         }
5759
5760         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5761
5762         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5763         bp->mac_addr[0] = (u8) (reg >> 8);
5764         bp->mac_addr[1] = (u8) reg;
5765
5766         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5767         bp->mac_addr[2] = (u8) (reg >> 24);
5768         bp->mac_addr[3] = (u8) (reg >> 16);
5769         bp->mac_addr[4] = (u8) (reg >> 8);
5770         bp->mac_addr[5] = (u8) reg;
5771
5772         bp->tx_ring_size = MAX_TX_DESC_CNT;
5773         bnx2_set_rx_ring_size(bp, 255);
5774
5775         bp->rx_csum = 1;
5776
5777         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5778
5779         bp->tx_quick_cons_trip_int = 20;
5780         bp->tx_quick_cons_trip = 20;
5781         bp->tx_ticks_int = 80;
5782         bp->tx_ticks = 80;
5783
5784         bp->rx_quick_cons_trip_int = 6;
5785         bp->rx_quick_cons_trip = 6;
5786         bp->rx_ticks_int = 18;
5787         bp->rx_ticks = 18;
5788
5789         bp->stats_ticks = 1000000 & 0xffff00;
5790
5791         bp->timer_interval =  HZ;
5792         bp->current_interval =  HZ;
5793
5794         bp->phy_addr = 1;
5795
5796         /* Disable WOL support if we are running on a SERDES chip. */
5797         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5798                 bp->phy_flags |= PHY_SERDES_FLAG;
5799                 bp->flags |= NO_WOL_FLAG;
5800                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5801                         bp->phy_addr = 2;
5802                         reg = REG_RD_IND(bp, bp->shmem_base +
5803                                          BNX2_SHARED_HW_CFG_CONFIG);
5804                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5805                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5806                 }
5807         }
5808
5809         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5810             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5811             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5812                 bp->flags |= NO_WOL_FLAG;
5813
5814         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5815                 bp->tx_quick_cons_trip_int =
5816                         bp->tx_quick_cons_trip;
5817                 bp->tx_ticks_int = bp->tx_ticks;
5818                 bp->rx_quick_cons_trip_int =
5819                         bp->rx_quick_cons_trip;
5820                 bp->rx_ticks_int = bp->rx_ticks;
5821                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5822                 bp->com_ticks_int = bp->com_ticks;
5823                 bp->cmd_ticks_int = bp->cmd_ticks;
5824         }
5825
5826         /* Disable MSI on 5706 if AMD 8132 bridge is found.
5827          *
5828          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
5829          * with byte enables disabled on the unused 32-bit word.  This is legal
5830          * but causes problems on the AMD 8132 which will eventually stop
5831          * responding after a while.
5832          *
5833          * AMD believes this incompatibility is unique to the 5706, and
5834          * prefers to locally disable MSI rather than globally disabling it
5835          * using pci_msi_quirk.
5836          */
5837         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5838                 struct pci_dev *amd_8132 = NULL;
5839
5840                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5841                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
5842                                                   amd_8132))) {
5843                         u8 rev;
5844
5845                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5846                         if (rev >= 0x10 && rev <= 0x13) {
5847                                 disable_msi = 1;
5848                                 pci_dev_put(amd_8132);
5849                                 break;
5850                         }
5851                 }
5852         }
5853
5854         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5855         bp->req_line_speed = 0;
5856         if (bp->phy_flags & PHY_SERDES_FLAG) {
5857                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5858
5859                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5860                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5861                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5862                         bp->autoneg = 0;
5863                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5864                         bp->req_duplex = DUPLEX_FULL;
5865                 }
5866         }
5867         else {
5868                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5869         }
5870
5871         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5872
5873         init_timer(&bp->timer);
5874         bp->timer.expires = RUN_AT(bp->timer_interval);
5875         bp->timer.data = (unsigned long) bp;
5876         bp->timer.function = bnx2_timer;
5877
5878         return 0;
5879
5880 err_out_unmap:
5881         if (bp->regview) {
5882                 iounmap(bp->regview);
5883                 bp->regview = NULL;
5884         }
5885
5886 err_out_release:
5887         pci_release_regions(pdev);
5888
5889 err_out_disable:
5890         pci_disable_device(pdev);
5891         pci_set_drvdata(pdev, NULL);
5892
5893 err_out:
5894         return rc;
5895 }
5896
5897 static int __devinit
5898 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5899 {
5900         static int version_printed = 0;
5901         struct net_device *dev = NULL;
5902         struct bnx2 *bp;
5903         int rc, i;
5904
5905         if (version_printed++ == 0)
5906                 printk(KERN_INFO "%s", version);
5907
5908         /* dev zeroed in init_etherdev */
5909         dev = alloc_etherdev(sizeof(*bp));
5910
5911         if (!dev)
5912                 return -ENOMEM;
5913
5914         rc = bnx2_init_board(pdev, dev);
5915         if (rc < 0) {
5916                 free_netdev(dev);
5917                 return rc;
5918         }
5919
5920         dev->open = bnx2_open;
5921         dev->hard_start_xmit = bnx2_start_xmit;
5922         dev->stop = bnx2_close;
5923         dev->get_stats = bnx2_get_stats;
5924         dev->set_multicast_list = bnx2_set_rx_mode;
5925         dev->do_ioctl = bnx2_ioctl;
5926         dev->set_mac_address = bnx2_change_mac_addr;
5927         dev->change_mtu = bnx2_change_mtu;
5928         dev->tx_timeout = bnx2_tx_timeout;
5929         dev->watchdog_timeo = TX_TIMEOUT;
5930 #ifdef BCM_VLAN
5931         dev->vlan_rx_register = bnx2_vlan_rx_register;
5932         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5933 #endif
5934         dev->poll = bnx2_poll;
5935         dev->ethtool_ops = &bnx2_ethtool_ops;
5936         dev->weight = 64;
5937
5938         bp = netdev_priv(dev);
5939
5940 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5941         dev->poll_controller = poll_bnx2;
5942 #endif
5943
5944         if ((rc = register_netdev(dev))) {
5945                 dev_err(&pdev->dev, "Cannot register net device\n");
5946                 if (bp->regview)
5947                         iounmap(bp->regview);
5948                 pci_release_regions(pdev);
5949                 pci_disable_device(pdev);
5950                 pci_set_drvdata(pdev, NULL);
5951                 free_netdev(dev);
5952                 return rc;
5953         }
5954
5955         pci_set_drvdata(pdev, dev);
5956
5957         memcpy(dev->dev_addr, bp->mac_addr, 6);
5958         memcpy(dev->perm_addr, bp->mac_addr, 6);
5959         bp->name = board_info[ent->driver_data].name,
5960         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5961                 "IRQ %d, ",
5962                 dev->name,
5963                 bp->name,
5964                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5965                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5966                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5967                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5968                 bp->bus_speed_mhz,
5969                 dev->base_addr,
5970                 bp->pdev->irq);
5971
5972         printk("node addr ");
5973         for (i = 0; i < 6; i++)
5974                 printk("%2.2x", dev->dev_addr[i]);
5975         printk("\n");
5976
5977         dev->features |= NETIF_F_SG;
5978         if (bp->flags & USING_DAC_FLAG)
5979                 dev->features |= NETIF_F_HIGHDMA;
5980         dev->features |= NETIF_F_IP_CSUM;
5981 #ifdef BCM_VLAN
5982         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5983 #endif
5984 #ifdef BCM_TSO
5985         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5986 #endif
5987
5988         netif_carrier_off(bp->dev);
5989
5990         return 0;
5991 }
5992
5993 static void __devexit
5994 bnx2_remove_one(struct pci_dev *pdev)
5995 {
5996         struct net_device *dev = pci_get_drvdata(pdev);
5997         struct bnx2 *bp = netdev_priv(dev);
5998
5999         flush_scheduled_work();
6000
6001         unregister_netdev(dev);
6002
6003         if (bp->regview)
6004                 iounmap(bp->regview);
6005
6006         free_netdev(dev);
6007         pci_release_regions(pdev);
6008         pci_disable_device(pdev);
6009         pci_set_drvdata(pdev, NULL);
6010 }
6011
6012 static int
6013 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6014 {
6015         struct net_device *dev = pci_get_drvdata(pdev);
6016         struct bnx2 *bp = netdev_priv(dev);
6017         u32 reset_code;
6018
6019         if (!netif_running(dev))
6020                 return 0;
6021
6022         flush_scheduled_work();
6023         bnx2_netif_stop(bp);
6024         netif_device_detach(dev);
6025         del_timer_sync(&bp->timer);
6026         if (bp->flags & NO_WOL_FLAG)
6027                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6028         else if (bp->wol)
6029                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6030         else
6031                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6032         bnx2_reset_chip(bp, reset_code);
6033         bnx2_free_skbs(bp);
6034         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6035         return 0;
6036 }
6037
6038 static int
6039 bnx2_resume(struct pci_dev *pdev)
6040 {
6041         struct net_device *dev = pci_get_drvdata(pdev);
6042         struct bnx2 *bp = netdev_priv(dev);
6043
6044         if (!netif_running(dev))
6045                 return 0;
6046
6047         bnx2_set_power_state(bp, PCI_D0);
6048         netif_device_attach(dev);
6049         bnx2_init_nic(bp);
6050         bnx2_netif_start(bp);
6051         return 0;
6052 }
6053
6054 static struct pci_driver bnx2_pci_driver = {
6055         .name           = DRV_MODULE_NAME,
6056         .id_table       = bnx2_pci_tbl,
6057         .probe          = bnx2_init_one,
6058         .remove         = __devexit_p(bnx2_remove_one),
6059         .suspend        = bnx2_suspend,
6060         .resume         = bnx2_resume,
6061 };
6062
6063 static int __init bnx2_init(void)
6064 {
6065         return pci_register_driver(&bnx2_pci_driver);
6066 }
6067
6068 static void __exit bnx2_cleanup(void)
6069 {
6070         pci_unregister_driver(&bnx2_pci_driver);
6071 }
6072
6073 module_init(bnx2_init);
6074 module_exit(bnx2_cleanup);
6075
6076
6077