]> Pileus Git - ~andy/linux/blob - drivers/net/bnx2.c
[BNX2]: Add 5709 init code.
[~andy/linux] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.4.45"
60 #define DRV_MODULE_RELDATE      "September 29, 2006"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { 0, }
119 };
120
121 static struct flash_spec flash_table[] =
122 {
123         /* Slow EEPROM */
124         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127          "EEPROM - slow"},
128         /* Expansion entry 0001 */
129         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132          "Entry 0001"},
133         /* Saifun SA25F010 (non-buffered flash) */
134         /* strap, cfg1, & write1 need updates */
135         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138          "Non-buffered flash (128kB)"},
139         /* Saifun SA25F020 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144          "Non-buffered flash (256kB)"},
145         /* Expansion entry 0100 */
146         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149          "Entry 0100"},
150         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160         /* Saifun SA25F005 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165          "Non-buffered flash (64kB)"},
166         /* Fast EEPROM */
167         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170          "EEPROM - fast"},
171         /* Expansion entry 1001 */
172         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175          "Entry 1001"},
176         /* Expansion entry 1010 */
177         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1010"},
181         /* ATMEL AT45DB011B (buffered flash) */
182         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185          "Buffered flash (128kB)"},
186         /* Expansion entry 1100 */
187         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1100"},
191         /* Expansion entry 1101 */
192         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1101"},
196         /* Ateml Expansion entry 1110 */
197         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1110 (Atmel)"},
201         /* ATMEL AT45DB021B (buffered flash) */
202         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205          "Buffered flash (256kB)"},
206 };
207
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211 {
212         u32 diff;
213
214         smp_mb();
215         diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216         if (diff > MAX_TX_DESC_CNT)
217                 diff = (diff & MAX_TX_DESC_CNT) - 1;
218         return (bp->tx_ring_size - diff);
219 }
220
221 static u32
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223 {
224         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226 }
227
228 static void
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230 {
231         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233 }
234
235 static void
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237 {
238         offset += cid_addr;
239         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
240                 int i;
241
242                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
243                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
244                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
245                 for (i = 0; i < 5; i++) {
246                         u32 val;
247                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
248                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
249                                 break;
250                         udelay(5);
251                 }
252         } else {
253                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
254                 REG_WR(bp, BNX2_CTX_DATA, val);
255         }
256 }
257
258 static int
259 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
260 {
261         u32 val1;
262         int i, ret;
263
264         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
265                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
266                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
267
268                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
269                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
270
271                 udelay(40);
272         }
273
274         val1 = (bp->phy_addr << 21) | (reg << 16) |
275                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
276                 BNX2_EMAC_MDIO_COMM_START_BUSY;
277         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
278
279         for (i = 0; i < 50; i++) {
280                 udelay(10);
281
282                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
283                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
284                         udelay(5);
285
286                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
287                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
288
289                         break;
290                 }
291         }
292
293         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
294                 *val = 0x0;
295                 ret = -EBUSY;
296         }
297         else {
298                 *val = val1;
299                 ret = 0;
300         }
301
302         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
303                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
305
306                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
307                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308
309                 udelay(40);
310         }
311
312         return ret;
313 }
314
315 static int
316 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
317 {
318         u32 val1;
319         int i, ret;
320
321         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
322                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
323                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
324
325                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
326                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327
328                 udelay(40);
329         }
330
331         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
332                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
333                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
334         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
335
336         for (i = 0; i < 50; i++) {
337                 udelay(10);
338
339                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
340                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
341                         udelay(5);
342                         break;
343                 }
344         }
345
346         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
347                 ret = -EBUSY;
348         else
349                 ret = 0;
350
351         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
352                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
354
355                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
356                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
357
358                 udelay(40);
359         }
360
361         return ret;
362 }
363
364 static void
365 bnx2_disable_int(struct bnx2 *bp)
366 {
367         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
368                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
369         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
370 }
371
372 static void
373 bnx2_enable_int(struct bnx2 *bp)
374 {
375         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
376                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
378
379         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
381
382         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
383 }
384
385 static void
386 bnx2_disable_int_sync(struct bnx2 *bp)
387 {
388         atomic_inc(&bp->intr_sem);
389         bnx2_disable_int(bp);
390         synchronize_irq(bp->pdev->irq);
391 }
392
393 static void
394 bnx2_netif_stop(struct bnx2 *bp)
395 {
396         bnx2_disable_int_sync(bp);
397         if (netif_running(bp->dev)) {
398                 netif_poll_disable(bp->dev);
399                 netif_tx_disable(bp->dev);
400                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
401         }
402 }
403
404 static void
405 bnx2_netif_start(struct bnx2 *bp)
406 {
407         if (atomic_dec_and_test(&bp->intr_sem)) {
408                 if (netif_running(bp->dev)) {
409                         netif_wake_queue(bp->dev);
410                         netif_poll_enable(bp->dev);
411                         bnx2_enable_int(bp);
412                 }
413         }
414 }
415
416 static void
417 bnx2_free_mem(struct bnx2 *bp)
418 {
419         int i;
420
421         for (i = 0; i < bp->ctx_pages; i++) {
422                 if (bp->ctx_blk[i]) {
423                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
424                                             bp->ctx_blk[i],
425                                             bp->ctx_blk_mapping[i]);
426                         bp->ctx_blk[i] = NULL;
427                 }
428         }
429         if (bp->status_blk) {
430                 pci_free_consistent(bp->pdev, bp->status_stats_size,
431                                     bp->status_blk, bp->status_blk_mapping);
432                 bp->status_blk = NULL;
433                 bp->stats_blk = NULL;
434         }
435         if (bp->tx_desc_ring) {
436                 pci_free_consistent(bp->pdev,
437                                     sizeof(struct tx_bd) * TX_DESC_CNT,
438                                     bp->tx_desc_ring, bp->tx_desc_mapping);
439                 bp->tx_desc_ring = NULL;
440         }
441         kfree(bp->tx_buf_ring);
442         bp->tx_buf_ring = NULL;
443         for (i = 0; i < bp->rx_max_ring; i++) {
444                 if (bp->rx_desc_ring[i])
445                         pci_free_consistent(bp->pdev,
446                                             sizeof(struct rx_bd) * RX_DESC_CNT,
447                                             bp->rx_desc_ring[i],
448                                             bp->rx_desc_mapping[i]);
449                 bp->rx_desc_ring[i] = NULL;
450         }
451         vfree(bp->rx_buf_ring);
452         bp->rx_buf_ring = NULL;
453 }
454
455 static int
456 bnx2_alloc_mem(struct bnx2 *bp)
457 {
458         int i, status_blk_size;
459
460         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
461                                   GFP_KERNEL);
462         if (bp->tx_buf_ring == NULL)
463                 return -ENOMEM;
464
465         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
466                                                 sizeof(struct tx_bd) *
467                                                 TX_DESC_CNT,
468                                                 &bp->tx_desc_mapping);
469         if (bp->tx_desc_ring == NULL)
470                 goto alloc_mem_err;
471
472         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
473                                   bp->rx_max_ring);
474         if (bp->rx_buf_ring == NULL)
475                 goto alloc_mem_err;
476
477         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
478                                    bp->rx_max_ring);
479
480         for (i = 0; i < bp->rx_max_ring; i++) {
481                 bp->rx_desc_ring[i] =
482                         pci_alloc_consistent(bp->pdev,
483                                              sizeof(struct rx_bd) * RX_DESC_CNT,
484                                              &bp->rx_desc_mapping[i]);
485                 if (bp->rx_desc_ring[i] == NULL)
486                         goto alloc_mem_err;
487
488         }
489
490         /* Combine status and statistics blocks into one allocation. */
491         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
492         bp->status_stats_size = status_blk_size +
493                                 sizeof(struct statistics_block);
494
495         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
496                                               &bp->status_blk_mapping);
497         if (bp->status_blk == NULL)
498                 goto alloc_mem_err;
499
500         memset(bp->status_blk, 0, bp->status_stats_size);
501
502         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
503                                   status_blk_size);
504
505         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
506
507         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
508                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
509                 if (bp->ctx_pages == 0)
510                         bp->ctx_pages = 1;
511                 for (i = 0; i < bp->ctx_pages; i++) {
512                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
513                                                 BCM_PAGE_SIZE,
514                                                 &bp->ctx_blk_mapping[i]);
515                         if (bp->ctx_blk[i] == NULL)
516                                 goto alloc_mem_err;
517                 }
518         }
519         return 0;
520
521 alloc_mem_err:
522         bnx2_free_mem(bp);
523         return -ENOMEM;
524 }
525
526 static void
527 bnx2_report_fw_link(struct bnx2 *bp)
528 {
529         u32 fw_link_status = 0;
530
531         if (bp->link_up) {
532                 u32 bmsr;
533
534                 switch (bp->line_speed) {
535                 case SPEED_10:
536                         if (bp->duplex == DUPLEX_HALF)
537                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
538                         else
539                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
540                         break;
541                 case SPEED_100:
542                         if (bp->duplex == DUPLEX_HALF)
543                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
544                         else
545                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
546                         break;
547                 case SPEED_1000:
548                         if (bp->duplex == DUPLEX_HALF)
549                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
550                         else
551                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
552                         break;
553                 case SPEED_2500:
554                         if (bp->duplex == DUPLEX_HALF)
555                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
556                         else
557                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
558                         break;
559                 }
560
561                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
562
563                 if (bp->autoneg) {
564                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
565
566                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
567                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
568
569                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
570                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
571                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
572                         else
573                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
574                 }
575         }
576         else
577                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
578
579         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
580 }
581
582 static void
583 bnx2_report_link(struct bnx2 *bp)
584 {
585         if (bp->link_up) {
586                 netif_carrier_on(bp->dev);
587                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
588
589                 printk("%d Mbps ", bp->line_speed);
590
591                 if (bp->duplex == DUPLEX_FULL)
592                         printk("full duplex");
593                 else
594                         printk("half duplex");
595
596                 if (bp->flow_ctrl) {
597                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
598                                 printk(", receive ");
599                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
600                                         printk("& transmit ");
601                         }
602                         else {
603                                 printk(", transmit ");
604                         }
605                         printk("flow control ON");
606                 }
607                 printk("\n");
608         }
609         else {
610                 netif_carrier_off(bp->dev);
611                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
612         }
613
614         bnx2_report_fw_link(bp);
615 }
616
617 static void
618 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
619 {
620         u32 local_adv, remote_adv;
621
622         bp->flow_ctrl = 0;
623         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
624                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
625
626                 if (bp->duplex == DUPLEX_FULL) {
627                         bp->flow_ctrl = bp->req_flow_ctrl;
628                 }
629                 return;
630         }
631
632         if (bp->duplex != DUPLEX_FULL) {
633                 return;
634         }
635
636         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
637             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
638                 u32 val;
639
640                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
641                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
642                         bp->flow_ctrl |= FLOW_CTRL_TX;
643                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
644                         bp->flow_ctrl |= FLOW_CTRL_RX;
645                 return;
646         }
647
648         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
649         bnx2_read_phy(bp, MII_LPA, &remote_adv);
650
651         if (bp->phy_flags & PHY_SERDES_FLAG) {
652                 u32 new_local_adv = 0;
653                 u32 new_remote_adv = 0;
654
655                 if (local_adv & ADVERTISE_1000XPAUSE)
656                         new_local_adv |= ADVERTISE_PAUSE_CAP;
657                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
658                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
659                 if (remote_adv & ADVERTISE_1000XPAUSE)
660                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
661                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
662                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
663
664                 local_adv = new_local_adv;
665                 remote_adv = new_remote_adv;
666         }
667
668         /* See Table 28B-3 of 802.3ab-1999 spec. */
669         if (local_adv & ADVERTISE_PAUSE_CAP) {
670                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
671                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
672                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
673                         }
674                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
675                                 bp->flow_ctrl = FLOW_CTRL_RX;
676                         }
677                 }
678                 else {
679                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
680                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
681                         }
682                 }
683         }
684         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
685                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
686                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
687
688                         bp->flow_ctrl = FLOW_CTRL_TX;
689                 }
690         }
691 }
692
693 static int
694 bnx2_5708s_linkup(struct bnx2 *bp)
695 {
696         u32 val;
697
698         bp->link_up = 1;
699         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
700         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
701                 case BCM5708S_1000X_STAT1_SPEED_10:
702                         bp->line_speed = SPEED_10;
703                         break;
704                 case BCM5708S_1000X_STAT1_SPEED_100:
705                         bp->line_speed = SPEED_100;
706                         break;
707                 case BCM5708S_1000X_STAT1_SPEED_1G:
708                         bp->line_speed = SPEED_1000;
709                         break;
710                 case BCM5708S_1000X_STAT1_SPEED_2G5:
711                         bp->line_speed = SPEED_2500;
712                         break;
713         }
714         if (val & BCM5708S_1000X_STAT1_FD)
715                 bp->duplex = DUPLEX_FULL;
716         else
717                 bp->duplex = DUPLEX_HALF;
718
719         return 0;
720 }
721
722 static int
723 bnx2_5706s_linkup(struct bnx2 *bp)
724 {
725         u32 bmcr, local_adv, remote_adv, common;
726
727         bp->link_up = 1;
728         bp->line_speed = SPEED_1000;
729
730         bnx2_read_phy(bp, MII_BMCR, &bmcr);
731         if (bmcr & BMCR_FULLDPLX) {
732                 bp->duplex = DUPLEX_FULL;
733         }
734         else {
735                 bp->duplex = DUPLEX_HALF;
736         }
737
738         if (!(bmcr & BMCR_ANENABLE)) {
739                 return 0;
740         }
741
742         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
743         bnx2_read_phy(bp, MII_LPA, &remote_adv);
744
745         common = local_adv & remote_adv;
746         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
747
748                 if (common & ADVERTISE_1000XFULL) {
749                         bp->duplex = DUPLEX_FULL;
750                 }
751                 else {
752                         bp->duplex = DUPLEX_HALF;
753                 }
754         }
755
756         return 0;
757 }
758
759 static int
760 bnx2_copper_linkup(struct bnx2 *bp)
761 {
762         u32 bmcr;
763
764         bnx2_read_phy(bp, MII_BMCR, &bmcr);
765         if (bmcr & BMCR_ANENABLE) {
766                 u32 local_adv, remote_adv, common;
767
768                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
769                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
770
771                 common = local_adv & (remote_adv >> 2);
772                 if (common & ADVERTISE_1000FULL) {
773                         bp->line_speed = SPEED_1000;
774                         bp->duplex = DUPLEX_FULL;
775                 }
776                 else if (common & ADVERTISE_1000HALF) {
777                         bp->line_speed = SPEED_1000;
778                         bp->duplex = DUPLEX_HALF;
779                 }
780                 else {
781                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
782                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
783
784                         common = local_adv & remote_adv;
785                         if (common & ADVERTISE_100FULL) {
786                                 bp->line_speed = SPEED_100;
787                                 bp->duplex = DUPLEX_FULL;
788                         }
789                         else if (common & ADVERTISE_100HALF) {
790                                 bp->line_speed = SPEED_100;
791                                 bp->duplex = DUPLEX_HALF;
792                         }
793                         else if (common & ADVERTISE_10FULL) {
794                                 bp->line_speed = SPEED_10;
795                                 bp->duplex = DUPLEX_FULL;
796                         }
797                         else if (common & ADVERTISE_10HALF) {
798                                 bp->line_speed = SPEED_10;
799                                 bp->duplex = DUPLEX_HALF;
800                         }
801                         else {
802                                 bp->line_speed = 0;
803                                 bp->link_up = 0;
804                         }
805                 }
806         }
807         else {
808                 if (bmcr & BMCR_SPEED100) {
809                         bp->line_speed = SPEED_100;
810                 }
811                 else {
812                         bp->line_speed = SPEED_10;
813                 }
814                 if (bmcr & BMCR_FULLDPLX) {
815                         bp->duplex = DUPLEX_FULL;
816                 }
817                 else {
818                         bp->duplex = DUPLEX_HALF;
819                 }
820         }
821
822         return 0;
823 }
824
825 static int
826 bnx2_set_mac_link(struct bnx2 *bp)
827 {
828         u32 val;
829
830         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
831         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
832                 (bp->duplex == DUPLEX_HALF)) {
833                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
834         }
835
836         /* Configure the EMAC mode register. */
837         val = REG_RD(bp, BNX2_EMAC_MODE);
838
839         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
840                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
841                 BNX2_EMAC_MODE_25G_MODE);
842
843         if (bp->link_up) {
844                 switch (bp->line_speed) {
845                         case SPEED_10:
846                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
847                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
848                                         break;
849                                 }
850                                 /* fall through */
851                         case SPEED_100:
852                                 val |= BNX2_EMAC_MODE_PORT_MII;
853                                 break;
854                         case SPEED_2500:
855                                 val |= BNX2_EMAC_MODE_25G_MODE;
856                                 /* fall through */
857                         case SPEED_1000:
858                                 val |= BNX2_EMAC_MODE_PORT_GMII;
859                                 break;
860                 }
861         }
862         else {
863                 val |= BNX2_EMAC_MODE_PORT_GMII;
864         }
865
866         /* Set the MAC to operate in the appropriate duplex mode. */
867         if (bp->duplex == DUPLEX_HALF)
868                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
869         REG_WR(bp, BNX2_EMAC_MODE, val);
870
871         /* Enable/disable rx PAUSE. */
872         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
873
874         if (bp->flow_ctrl & FLOW_CTRL_RX)
875                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
876         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
877
878         /* Enable/disable tx PAUSE. */
879         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
880         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
881
882         if (bp->flow_ctrl & FLOW_CTRL_TX)
883                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
884         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
885
886         /* Acknowledge the interrupt. */
887         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
888
889         return 0;
890 }
891
892 static int
893 bnx2_set_link(struct bnx2 *bp)
894 {
895         u32 bmsr;
896         u8 link_up;
897
898         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
899                 bp->link_up = 1;
900                 return 0;
901         }
902
903         link_up = bp->link_up;
904
905         bnx2_read_phy(bp, MII_BMSR, &bmsr);
906         bnx2_read_phy(bp, MII_BMSR, &bmsr);
907
908         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
909             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
910                 u32 val;
911
912                 val = REG_RD(bp, BNX2_EMAC_STATUS);
913                 if (val & BNX2_EMAC_STATUS_LINK)
914                         bmsr |= BMSR_LSTATUS;
915                 else
916                         bmsr &= ~BMSR_LSTATUS;
917         }
918
919         if (bmsr & BMSR_LSTATUS) {
920                 bp->link_up = 1;
921
922                 if (bp->phy_flags & PHY_SERDES_FLAG) {
923                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
924                                 bnx2_5706s_linkup(bp);
925                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
926                                 bnx2_5708s_linkup(bp);
927                 }
928                 else {
929                         bnx2_copper_linkup(bp);
930                 }
931                 bnx2_resolve_flow_ctrl(bp);
932         }
933         else {
934                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
935                         (bp->autoneg & AUTONEG_SPEED)) {
936
937                         u32 bmcr;
938
939                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
940                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
941                         if (!(bmcr & BMCR_ANENABLE)) {
942                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
943                                         BMCR_ANENABLE);
944                         }
945                 }
946                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
947                 bp->link_up = 0;
948         }
949
950         if (bp->link_up != link_up) {
951                 bnx2_report_link(bp);
952         }
953
954         bnx2_set_mac_link(bp);
955
956         return 0;
957 }
958
959 static int
960 bnx2_reset_phy(struct bnx2 *bp)
961 {
962         int i;
963         u32 reg;
964
965         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
966
967 #define PHY_RESET_MAX_WAIT 100
968         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
969                 udelay(10);
970
971                 bnx2_read_phy(bp, MII_BMCR, &reg);
972                 if (!(reg & BMCR_RESET)) {
973                         udelay(20);
974                         break;
975                 }
976         }
977         if (i == PHY_RESET_MAX_WAIT) {
978                 return -EBUSY;
979         }
980         return 0;
981 }
982
983 static u32
984 bnx2_phy_get_pause_adv(struct bnx2 *bp)
985 {
986         u32 adv = 0;
987
988         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
989                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
990
991                 if (bp->phy_flags & PHY_SERDES_FLAG) {
992                         adv = ADVERTISE_1000XPAUSE;
993                 }
994                 else {
995                         adv = ADVERTISE_PAUSE_CAP;
996                 }
997         }
998         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
999                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1000                         adv = ADVERTISE_1000XPSE_ASYM;
1001                 }
1002                 else {
1003                         adv = ADVERTISE_PAUSE_ASYM;
1004                 }
1005         }
1006         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1007                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1008                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1009                 }
1010                 else {
1011                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1012                 }
1013         }
1014         return adv;
1015 }
1016
1017 static int
1018 bnx2_setup_serdes_phy(struct bnx2 *bp)
1019 {
1020         u32 adv, bmcr, up1;
1021         u32 new_adv = 0;
1022
1023         if (!(bp->autoneg & AUTONEG_SPEED)) {
1024                 u32 new_bmcr;
1025                 int force_link_down = 0;
1026
1027                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1028                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1029
1030                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1031                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1032                 new_bmcr |= BMCR_SPEED1000;
1033                 if (bp->req_line_speed == SPEED_2500) {
1034                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1035                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1036                         if (!(up1 & BCM5708S_UP1_2G5)) {
1037                                 up1 |= BCM5708S_UP1_2G5;
1038                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039                                 force_link_down = 1;
1040                         }
1041                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1042                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1043                         if (up1 & BCM5708S_UP1_2G5) {
1044                                 up1 &= ~BCM5708S_UP1_2G5;
1045                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1046                                 force_link_down = 1;
1047                         }
1048                 }
1049
1050                 if (bp->req_duplex == DUPLEX_FULL) {
1051                         adv |= ADVERTISE_1000XFULL;
1052                         new_bmcr |= BMCR_FULLDPLX;
1053                 }
1054                 else {
1055                         adv |= ADVERTISE_1000XHALF;
1056                         new_bmcr &= ~BMCR_FULLDPLX;
1057                 }
1058                 if ((new_bmcr != bmcr) || (force_link_down)) {
1059                         /* Force a link down visible on the other side */
1060                         if (bp->link_up) {
1061                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1062                                                ~(ADVERTISE_1000XFULL |
1063                                                  ADVERTISE_1000XHALF));
1064                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1065                                         BMCR_ANRESTART | BMCR_ANENABLE);
1066
1067                                 bp->link_up = 0;
1068                                 netif_carrier_off(bp->dev);
1069                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1070                                 bnx2_report_link(bp);
1071                         }
1072                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1073                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1074                 }
1075                 return 0;
1076         }
1077
1078         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1079                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1080                 up1 |= BCM5708S_UP1_2G5;
1081                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1082         }
1083
1084         if (bp->advertising & ADVERTISED_1000baseT_Full)
1085                 new_adv |= ADVERTISE_1000XFULL;
1086
1087         new_adv |= bnx2_phy_get_pause_adv(bp);
1088
1089         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1090         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1091
1092         bp->serdes_an_pending = 0;
1093         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1094                 /* Force a link down visible on the other side */
1095                 if (bp->link_up) {
1096                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1097                         spin_unlock_bh(&bp->phy_lock);
1098                         msleep(20);
1099                         spin_lock_bh(&bp->phy_lock);
1100                 }
1101
1102                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1103                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1104                         BMCR_ANENABLE);
1105                 /* Speed up link-up time when the link partner
1106                  * does not autonegotiate which is very common
1107                  * in blade servers. Some blade servers use
1108                  * IPMI for kerboard input and it's important
1109                  * to minimize link disruptions. Autoneg. involves
1110                  * exchanging base pages plus 3 next pages and
1111                  * normally completes in about 120 msec.
1112                  */
1113                 bp->current_interval = SERDES_AN_TIMEOUT;
1114                 bp->serdes_an_pending = 1;
1115                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1116         }
1117
1118         return 0;
1119 }
1120
1121 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1122         (ADVERTISED_1000baseT_Full)
1123
1124 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1125         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1126         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1127         ADVERTISED_1000baseT_Full)
1128
1129 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1131
1132 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1133
1134 static int
1135 bnx2_setup_copper_phy(struct bnx2 *bp)
1136 {
1137         u32 bmcr;
1138         u32 new_bmcr;
1139
1140         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1141
1142         if (bp->autoneg & AUTONEG_SPEED) {
1143                 u32 adv_reg, adv1000_reg;
1144                 u32 new_adv_reg = 0;
1145                 u32 new_adv1000_reg = 0;
1146
1147                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149                         ADVERTISE_PAUSE_ASYM);
1150
1151                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152                 adv1000_reg &= PHY_ALL_1000_SPEED;
1153
1154                 if (bp->advertising & ADVERTISED_10baseT_Half)
1155                         new_adv_reg |= ADVERTISE_10HALF;
1156                 if (bp->advertising & ADVERTISED_10baseT_Full)
1157                         new_adv_reg |= ADVERTISE_10FULL;
1158                 if (bp->advertising & ADVERTISED_100baseT_Half)
1159                         new_adv_reg |= ADVERTISE_100HALF;
1160                 if (bp->advertising & ADVERTISED_100baseT_Full)
1161                         new_adv_reg |= ADVERTISE_100FULL;
1162                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163                         new_adv1000_reg |= ADVERTISE_1000FULL;
1164
1165                 new_adv_reg |= ADVERTISE_CSMA;
1166
1167                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1168
1169                 if ((adv1000_reg != new_adv1000_reg) ||
1170                         (adv_reg != new_adv_reg) ||
1171                         ((bmcr & BMCR_ANENABLE) == 0)) {
1172
1173                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1176                                 BMCR_ANENABLE);
1177                 }
1178                 else if (bp->link_up) {
1179                         /* Flow ctrl may have changed from auto to forced */
1180                         /* or vice-versa. */
1181
1182                         bnx2_resolve_flow_ctrl(bp);
1183                         bnx2_set_mac_link(bp);
1184                 }
1185                 return 0;
1186         }
1187
1188         new_bmcr = 0;
1189         if (bp->req_line_speed == SPEED_100) {
1190                 new_bmcr |= BMCR_SPEED100;
1191         }
1192         if (bp->req_duplex == DUPLEX_FULL) {
1193                 new_bmcr |= BMCR_FULLDPLX;
1194         }
1195         if (new_bmcr != bmcr) {
1196                 u32 bmsr;
1197
1198                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1199                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200
1201                 if (bmsr & BMSR_LSTATUS) {
1202                         /* Force link down */
1203                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1204                         spin_unlock_bh(&bp->phy_lock);
1205                         msleep(50);
1206                         spin_lock_bh(&bp->phy_lock);
1207
1208                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210                 }
1211
1212                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1213
1214                 /* Normally, the new speed is setup after the link has
1215                  * gone down and up again. In some cases, link will not go
1216                  * down so we need to set up the new speed here.
1217                  */
1218                 if (bmsr & BMSR_LSTATUS) {
1219                         bp->line_speed = bp->req_line_speed;
1220                         bp->duplex = bp->req_duplex;
1221                         bnx2_resolve_flow_ctrl(bp);
1222                         bnx2_set_mac_link(bp);
1223                 }
1224         }
1225         return 0;
1226 }
1227
1228 static int
1229 bnx2_setup_phy(struct bnx2 *bp)
1230 {
1231         if (bp->loopback == MAC_LOOPBACK)
1232                 return 0;
1233
1234         if (bp->phy_flags & PHY_SERDES_FLAG) {
1235                 return (bnx2_setup_serdes_phy(bp));
1236         }
1237         else {
1238                 return (bnx2_setup_copper_phy(bp));
1239         }
1240 }
1241
1242 static int
1243 bnx2_init_5708s_phy(struct bnx2 *bp)
1244 {
1245         u32 val;
1246
1247         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1248         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1249         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1250
1251         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1252         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1253         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1254
1255         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1256         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1257         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1258
1259         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1260                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1261                 val |= BCM5708S_UP1_2G5;
1262                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1263         }
1264
1265         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1266             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1267             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1268                 /* increase tx signal amplitude */
1269                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1270                                BCM5708S_BLK_ADDR_TX_MISC);
1271                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1272                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1273                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1274                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1275         }
1276
1277         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1278               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1279
1280         if (val) {
1281                 u32 is_backplane;
1282
1283                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1284                                           BNX2_SHARED_HW_CFG_CONFIG);
1285                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1286                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1287                                        BCM5708S_BLK_ADDR_TX_MISC);
1288                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1289                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1290                                        BCM5708S_BLK_ADDR_DIG);
1291                 }
1292         }
1293         return 0;
1294 }
1295
1296 static int
1297 bnx2_init_5706s_phy(struct bnx2 *bp)
1298 {
1299         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1300
1301         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1302                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1303
1304         if (bp->dev->mtu > 1500) {
1305                 u32 val;
1306
1307                 /* Set extended packet length bit */
1308                 bnx2_write_phy(bp, 0x18, 0x7);
1309                 bnx2_read_phy(bp, 0x18, &val);
1310                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1311
1312                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1313                 bnx2_read_phy(bp, 0x1c, &val);
1314                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1315         }
1316         else {
1317                 u32 val;
1318
1319                 bnx2_write_phy(bp, 0x18, 0x7);
1320                 bnx2_read_phy(bp, 0x18, &val);
1321                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1322
1323                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1324                 bnx2_read_phy(bp, 0x1c, &val);
1325                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1326         }
1327
1328         return 0;
1329 }
1330
1331 static int
1332 bnx2_init_copper_phy(struct bnx2 *bp)
1333 {
1334         u32 val;
1335
1336         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1337
1338         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1339                 bnx2_write_phy(bp, 0x18, 0x0c00);
1340                 bnx2_write_phy(bp, 0x17, 0x000a);
1341                 bnx2_write_phy(bp, 0x15, 0x310b);
1342                 bnx2_write_phy(bp, 0x17, 0x201f);
1343                 bnx2_write_phy(bp, 0x15, 0x9506);
1344                 bnx2_write_phy(bp, 0x17, 0x401f);
1345                 bnx2_write_phy(bp, 0x15, 0x14e2);
1346                 bnx2_write_phy(bp, 0x18, 0x0400);
1347         }
1348
1349         if (bp->dev->mtu > 1500) {
1350                 /* Set extended packet length bit */
1351                 bnx2_write_phy(bp, 0x18, 0x7);
1352                 bnx2_read_phy(bp, 0x18, &val);
1353                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1354
1355                 bnx2_read_phy(bp, 0x10, &val);
1356                 bnx2_write_phy(bp, 0x10, val | 0x1);
1357         }
1358         else {
1359                 bnx2_write_phy(bp, 0x18, 0x7);
1360                 bnx2_read_phy(bp, 0x18, &val);
1361                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1362
1363                 bnx2_read_phy(bp, 0x10, &val);
1364                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1365         }
1366
1367         /* ethernet@wirespeed */
1368         bnx2_write_phy(bp, 0x18, 0x7007);
1369         bnx2_read_phy(bp, 0x18, &val);
1370         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1371         return 0;
1372 }
1373
1374
1375 static int
1376 bnx2_init_phy(struct bnx2 *bp)
1377 {
1378         u32 val;
1379         int rc = 0;
1380
1381         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1382         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1383
1384         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1385
1386         bnx2_reset_phy(bp);
1387
1388         bnx2_read_phy(bp, MII_PHYSID1, &val);
1389         bp->phy_id = val << 16;
1390         bnx2_read_phy(bp, MII_PHYSID2, &val);
1391         bp->phy_id |= val & 0xffff;
1392
1393         if (bp->phy_flags & PHY_SERDES_FLAG) {
1394                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                         rc = bnx2_init_5706s_phy(bp);
1396                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                         rc = bnx2_init_5708s_phy(bp);
1398         }
1399         else {
1400                 rc = bnx2_init_copper_phy(bp);
1401         }
1402
1403         bnx2_setup_phy(bp);
1404
1405         return rc;
1406 }
1407
1408 static int
1409 bnx2_set_mac_loopback(struct bnx2 *bp)
1410 {
1411         u32 mac_mode;
1412
1413         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1414         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1415         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1416         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1417         bp->link_up = 1;
1418         return 0;
1419 }
1420
1421 static int bnx2_test_link(struct bnx2 *);
1422
1423 static int
1424 bnx2_set_phy_loopback(struct bnx2 *bp)
1425 {
1426         u32 mac_mode;
1427         int rc, i;
1428
1429         spin_lock_bh(&bp->phy_lock);
1430         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1431                             BMCR_SPEED1000);
1432         spin_unlock_bh(&bp->phy_lock);
1433         if (rc)
1434                 return rc;
1435
1436         for (i = 0; i < 10; i++) {
1437                 if (bnx2_test_link(bp) == 0)
1438                         break;
1439                 msleep(100);
1440         }
1441
1442         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1443         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1444                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1445                       BNX2_EMAC_MODE_25G_MODE);
1446
1447         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1448         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1449         bp->link_up = 1;
1450         return 0;
1451 }
1452
1453 static int
1454 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1455 {
1456         int i;
1457         u32 val;
1458
1459         bp->fw_wr_seq++;
1460         msg_data |= bp->fw_wr_seq;
1461
1462         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1463
1464         /* wait for an acknowledgement. */
1465         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1466                 msleep(10);
1467
1468                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1469
1470                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1471                         break;
1472         }
1473         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1474                 return 0;
1475
1476         /* If we timed out, inform the firmware that this is the case. */
1477         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1478                 if (!silent)
1479                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1480                                             "%x\n", msg_data);
1481
1482                 msg_data &= ~BNX2_DRV_MSG_CODE;
1483                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1484
1485                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1486
1487                 return -EBUSY;
1488         }
1489
1490         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1491                 return -EIO;
1492
1493         return 0;
1494 }
1495
1496 static int
1497 bnx2_init_5709_context(struct bnx2 *bp)
1498 {
1499         int i, ret = 0;
1500         u32 val;
1501
1502         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1503         val |= (BCM_PAGE_BITS - 8) << 16;
1504         REG_WR(bp, BNX2_CTX_COMMAND, val);
1505         for (i = 0; i < bp->ctx_pages; i++) {
1506                 int j;
1507
1508                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1509                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1510                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1511                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1512                        (u64) bp->ctx_blk_mapping[i] >> 32);
1513                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1514                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1515                 for (j = 0; j < 10; j++) {
1516
1517                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1518                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1519                                 break;
1520                         udelay(5);
1521                 }
1522                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1523                         ret = -EBUSY;
1524                         break;
1525                 }
1526         }
1527         return ret;
1528 }
1529
1530 static void
1531 bnx2_init_context(struct bnx2 *bp)
1532 {
1533         u32 vcid;
1534
1535         vcid = 96;
1536         while (vcid) {
1537                 u32 vcid_addr, pcid_addr, offset;
1538
1539                 vcid--;
1540
1541                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1542                         u32 new_vcid;
1543
1544                         vcid_addr = GET_PCID_ADDR(vcid);
1545                         if (vcid & 0x8) {
1546                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1547                         }
1548                         else {
1549                                 new_vcid = vcid;
1550                         }
1551                         pcid_addr = GET_PCID_ADDR(new_vcid);
1552                 }
1553                 else {
1554                         vcid_addr = GET_CID_ADDR(vcid);
1555                         pcid_addr = vcid_addr;
1556                 }
1557
1558                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1559                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1560
1561                 /* Zero out the context. */
1562                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1563                         CTX_WR(bp, 0x00, offset, 0);
1564                 }
1565
1566                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1567                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1568         }
1569 }
1570
1571 static int
1572 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1573 {
1574         u16 *good_mbuf;
1575         u32 good_mbuf_cnt;
1576         u32 val;
1577
1578         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1579         if (good_mbuf == NULL) {
1580                 printk(KERN_ERR PFX "Failed to allocate memory in "
1581                                     "bnx2_alloc_bad_rbuf\n");
1582                 return -ENOMEM;
1583         }
1584
1585         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1586                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1587
1588         good_mbuf_cnt = 0;
1589
1590         /* Allocate a bunch of mbufs and save the good ones in an array. */
1591         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1592         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1593                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1594
1595                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1596
1597                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1598
1599                 /* The addresses with Bit 9 set are bad memory blocks. */
1600                 if (!(val & (1 << 9))) {
1601                         good_mbuf[good_mbuf_cnt] = (u16) val;
1602                         good_mbuf_cnt++;
1603                 }
1604
1605                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1606         }
1607
1608         /* Free the good ones back to the mbuf pool thus discarding
1609          * all the bad ones. */
1610         while (good_mbuf_cnt) {
1611                 good_mbuf_cnt--;
1612
1613                 val = good_mbuf[good_mbuf_cnt];
1614                 val = (val << 9) | val | 1;
1615
1616                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1617         }
1618         kfree(good_mbuf);
1619         return 0;
1620 }
1621
1622 static void
1623 bnx2_set_mac_addr(struct bnx2 *bp)
1624 {
1625         u32 val;
1626         u8 *mac_addr = bp->dev->dev_addr;
1627
1628         val = (mac_addr[0] << 8) | mac_addr[1];
1629
1630         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1631
1632         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1633                 (mac_addr[4] << 8) | mac_addr[5];
1634
1635         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1636 }
1637
1638 static inline int
1639 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1640 {
1641         struct sk_buff *skb;
1642         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1643         dma_addr_t mapping;
1644         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1645         unsigned long align;
1646
1647         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1648         if (skb == NULL) {
1649                 return -ENOMEM;
1650         }
1651
1652         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1653                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1654
1655         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1656                 PCI_DMA_FROMDEVICE);
1657
1658         rx_buf->skb = skb;
1659         pci_unmap_addr_set(rx_buf, mapping, mapping);
1660
1661         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1662         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1663
1664         bp->rx_prod_bseq += bp->rx_buf_use_size;
1665
1666         return 0;
1667 }
1668
1669 static void
1670 bnx2_phy_int(struct bnx2 *bp)
1671 {
1672         u32 new_link_state, old_link_state;
1673
1674         new_link_state = bp->status_blk->status_attn_bits &
1675                 STATUS_ATTN_BITS_LINK_STATE;
1676         old_link_state = bp->status_blk->status_attn_bits_ack &
1677                 STATUS_ATTN_BITS_LINK_STATE;
1678         if (new_link_state != old_link_state) {
1679                 if (new_link_state) {
1680                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1681                                 STATUS_ATTN_BITS_LINK_STATE);
1682                 }
1683                 else {
1684                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1685                                 STATUS_ATTN_BITS_LINK_STATE);
1686                 }
1687                 bnx2_set_link(bp);
1688         }
1689 }
1690
1691 static void
1692 bnx2_tx_int(struct bnx2 *bp)
1693 {
1694         struct status_block *sblk = bp->status_blk;
1695         u16 hw_cons, sw_cons, sw_ring_cons;
1696         int tx_free_bd = 0;
1697
1698         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1699         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1700                 hw_cons++;
1701         }
1702         sw_cons = bp->tx_cons;
1703
1704         while (sw_cons != hw_cons) {
1705                 struct sw_bd *tx_buf;
1706                 struct sk_buff *skb;
1707                 int i, last;
1708
1709                 sw_ring_cons = TX_RING_IDX(sw_cons);
1710
1711                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1712                 skb = tx_buf->skb;
1713 #ifdef BCM_TSO
1714                 /* partial BD completions possible with TSO packets */
1715                 if (skb_is_gso(skb)) {
1716                         u16 last_idx, last_ring_idx;
1717
1718                         last_idx = sw_cons +
1719                                 skb_shinfo(skb)->nr_frags + 1;
1720                         last_ring_idx = sw_ring_cons +
1721                                 skb_shinfo(skb)->nr_frags + 1;
1722                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1723                                 last_idx++;
1724                         }
1725                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1726                                 break;
1727                         }
1728                 }
1729 #endif
1730                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1731                         skb_headlen(skb), PCI_DMA_TODEVICE);
1732
1733                 tx_buf->skb = NULL;
1734                 last = skb_shinfo(skb)->nr_frags;
1735
1736                 for (i = 0; i < last; i++) {
1737                         sw_cons = NEXT_TX_BD(sw_cons);
1738
1739                         pci_unmap_page(bp->pdev,
1740                                 pci_unmap_addr(
1741                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1742                                         mapping),
1743                                 skb_shinfo(skb)->frags[i].size,
1744                                 PCI_DMA_TODEVICE);
1745                 }
1746
1747                 sw_cons = NEXT_TX_BD(sw_cons);
1748
1749                 tx_free_bd += last + 1;
1750
1751                 dev_kfree_skb(skb);
1752
1753                 hw_cons = bp->hw_tx_cons =
1754                         sblk->status_tx_quick_consumer_index0;
1755
1756                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1757                         hw_cons++;
1758                 }
1759         }
1760
1761         bp->tx_cons = sw_cons;
1762         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1763          * before checking for netif_queue_stopped().  Without the
1764          * memory barrier, there is a small possibility that bnx2_start_xmit()
1765          * will miss it and cause the queue to be stopped forever.
1766          */
1767         smp_mb();
1768
1769         if (unlikely(netif_queue_stopped(bp->dev)) &&
1770                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1771                 netif_tx_lock(bp->dev);
1772                 if ((netif_queue_stopped(bp->dev)) &&
1773                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1774                         netif_wake_queue(bp->dev);
1775                 netif_tx_unlock(bp->dev);
1776         }
1777 }
1778
1779 static inline void
1780 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1781         u16 cons, u16 prod)
1782 {
1783         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1784         struct rx_bd *cons_bd, *prod_bd;
1785
1786         cons_rx_buf = &bp->rx_buf_ring[cons];
1787         prod_rx_buf = &bp->rx_buf_ring[prod];
1788
1789         pci_dma_sync_single_for_device(bp->pdev,
1790                 pci_unmap_addr(cons_rx_buf, mapping),
1791                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1792
1793         bp->rx_prod_bseq += bp->rx_buf_use_size;
1794
1795         prod_rx_buf->skb = skb;
1796
1797         if (cons == prod)
1798                 return;
1799
1800         pci_unmap_addr_set(prod_rx_buf, mapping,
1801                         pci_unmap_addr(cons_rx_buf, mapping));
1802
1803         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1804         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1805         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1806         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1807 }
1808
1809 static int
1810 bnx2_rx_int(struct bnx2 *bp, int budget)
1811 {
1812         struct status_block *sblk = bp->status_blk;
1813         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1814         struct l2_fhdr *rx_hdr;
1815         int rx_pkt = 0;
1816
1817         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1818         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1819                 hw_cons++;
1820         }
1821         sw_cons = bp->rx_cons;
1822         sw_prod = bp->rx_prod;
1823
1824         /* Memory barrier necessary as speculative reads of the rx
1825          * buffer can be ahead of the index in the status block
1826          */
1827         rmb();
1828         while (sw_cons != hw_cons) {
1829                 unsigned int len;
1830                 u32 status;
1831                 struct sw_bd *rx_buf;
1832                 struct sk_buff *skb;
1833                 dma_addr_t dma_addr;
1834
1835                 sw_ring_cons = RX_RING_IDX(sw_cons);
1836                 sw_ring_prod = RX_RING_IDX(sw_prod);
1837
1838                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1839                 skb = rx_buf->skb;
1840
1841                 rx_buf->skb = NULL;
1842
1843                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1844
1845                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1846                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1847
1848                 rx_hdr = (struct l2_fhdr *) skb->data;
1849                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1850
1851                 if ((status = rx_hdr->l2_fhdr_status) &
1852                         (L2_FHDR_ERRORS_BAD_CRC |
1853                         L2_FHDR_ERRORS_PHY_DECODE |
1854                         L2_FHDR_ERRORS_ALIGNMENT |
1855                         L2_FHDR_ERRORS_TOO_SHORT |
1856                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1857
1858                         goto reuse_rx;
1859                 }
1860
1861                 /* Since we don't have a jumbo ring, copy small packets
1862                  * if mtu > 1500
1863                  */
1864                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1865                         struct sk_buff *new_skb;
1866
1867                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1868                         if (new_skb == NULL)
1869                                 goto reuse_rx;
1870
1871                         /* aligned copy */
1872                         memcpy(new_skb->data,
1873                                 skb->data + bp->rx_offset - 2,
1874                                 len + 2);
1875
1876                         skb_reserve(new_skb, 2);
1877                         skb_put(new_skb, len);
1878
1879                         bnx2_reuse_rx_skb(bp, skb,
1880                                 sw_ring_cons, sw_ring_prod);
1881
1882                         skb = new_skb;
1883                 }
1884                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1885                         pci_unmap_single(bp->pdev, dma_addr,
1886                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1887
1888                         skb_reserve(skb, bp->rx_offset);
1889                         skb_put(skb, len);
1890                 }
1891                 else {
1892 reuse_rx:
1893                         bnx2_reuse_rx_skb(bp, skb,
1894                                 sw_ring_cons, sw_ring_prod);
1895                         goto next_rx;
1896                 }
1897
1898                 skb->protocol = eth_type_trans(skb, bp->dev);
1899
1900                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1901                         (ntohs(skb->protocol) != 0x8100)) {
1902
1903                         dev_kfree_skb(skb);
1904                         goto next_rx;
1905
1906                 }
1907
1908                 skb->ip_summed = CHECKSUM_NONE;
1909                 if (bp->rx_csum &&
1910                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1911                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1912
1913                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1914                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1915                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1916                 }
1917
1918 #ifdef BCM_VLAN
1919                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1920                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1921                                 rx_hdr->l2_fhdr_vlan_tag);
1922                 }
1923                 else
1924 #endif
1925                         netif_receive_skb(skb);
1926
1927                 bp->dev->last_rx = jiffies;
1928                 rx_pkt++;
1929
1930 next_rx:
1931                 sw_cons = NEXT_RX_BD(sw_cons);
1932                 sw_prod = NEXT_RX_BD(sw_prod);
1933
1934                 if ((rx_pkt == budget))
1935                         break;
1936
1937                 /* Refresh hw_cons to see if there is new work */
1938                 if (sw_cons == hw_cons) {
1939                         hw_cons = bp->hw_rx_cons =
1940                                 sblk->status_rx_quick_consumer_index0;
1941                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1942                                 hw_cons++;
1943                         rmb();
1944                 }
1945         }
1946         bp->rx_cons = sw_cons;
1947         bp->rx_prod = sw_prod;
1948
1949         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1950
1951         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1952
1953         mmiowb();
1954
1955         return rx_pkt;
1956
1957 }
1958
1959 /* MSI ISR - The only difference between this and the INTx ISR
1960  * is that the MSI interrupt is always serviced.
1961  */
1962 static irqreturn_t
1963 bnx2_msi(int irq, void *dev_instance)
1964 {
1965         struct net_device *dev = dev_instance;
1966         struct bnx2 *bp = netdev_priv(dev);
1967
1968         prefetch(bp->status_blk);
1969         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1972
1973         /* Return here if interrupt is disabled. */
1974         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1975                 return IRQ_HANDLED;
1976
1977         netif_rx_schedule(dev);
1978
1979         return IRQ_HANDLED;
1980 }
1981
1982 static irqreturn_t
1983 bnx2_interrupt(int irq, void *dev_instance)
1984 {
1985         struct net_device *dev = dev_instance;
1986         struct bnx2 *bp = netdev_priv(dev);
1987
1988         /* When using INTx, it is possible for the interrupt to arrive
1989          * at the CPU before the status block posted prior to the
1990          * interrupt. Reading a register will flush the status block.
1991          * When using MSI, the MSI message will always complete after
1992          * the status block write.
1993          */
1994         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1995             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1996              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1997                 return IRQ_NONE;
1998
1999         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2001                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2002
2003         /* Return here if interrupt is shared and is disabled. */
2004         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2005                 return IRQ_HANDLED;
2006
2007         netif_rx_schedule(dev);
2008
2009         return IRQ_HANDLED;
2010 }
2011
2012 static inline int
2013 bnx2_has_work(struct bnx2 *bp)
2014 {
2015         struct status_block *sblk = bp->status_blk;
2016
2017         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2018             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2019                 return 1;
2020
2021         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2022             bp->link_up)
2023                 return 1;
2024
2025         return 0;
2026 }
2027
2028 static int
2029 bnx2_poll(struct net_device *dev, int *budget)
2030 {
2031         struct bnx2 *bp = netdev_priv(dev);
2032
2033         if ((bp->status_blk->status_attn_bits &
2034                 STATUS_ATTN_BITS_LINK_STATE) !=
2035                 (bp->status_blk->status_attn_bits_ack &
2036                 STATUS_ATTN_BITS_LINK_STATE)) {
2037
2038                 spin_lock(&bp->phy_lock);
2039                 bnx2_phy_int(bp);
2040                 spin_unlock(&bp->phy_lock);
2041
2042                 /* This is needed to take care of transient status
2043                  * during link changes.
2044                  */
2045                 REG_WR(bp, BNX2_HC_COMMAND,
2046                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2047                 REG_RD(bp, BNX2_HC_COMMAND);
2048         }
2049
2050         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2051                 bnx2_tx_int(bp);
2052
2053         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2054                 int orig_budget = *budget;
2055                 int work_done;
2056
2057                 if (orig_budget > dev->quota)
2058                         orig_budget = dev->quota;
2059
2060                 work_done = bnx2_rx_int(bp, orig_budget);
2061                 *budget -= work_done;
2062                 dev->quota -= work_done;
2063         }
2064
2065         bp->last_status_idx = bp->status_blk->status_idx;
2066         rmb();
2067
2068         if (!bnx2_has_work(bp)) {
2069                 netif_rx_complete(dev);
2070                 if (likely(bp->flags & USING_MSI_FLAG)) {
2071                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2072                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2073                                bp->last_status_idx);
2074                         return 0;
2075                 }
2076                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2077                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2079                        bp->last_status_idx);
2080
2081                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083                        bp->last_status_idx);
2084                 return 0;
2085         }
2086
2087         return 1;
2088 }
2089
2090 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2091  * from set_multicast.
2092  */
2093 static void
2094 bnx2_set_rx_mode(struct net_device *dev)
2095 {
2096         struct bnx2 *bp = netdev_priv(dev);
2097         u32 rx_mode, sort_mode;
2098         int i;
2099
2100         spin_lock_bh(&bp->phy_lock);
2101
2102         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2103                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2104         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2105 #ifdef BCM_VLAN
2106         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2107                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2108 #else
2109         if (!(bp->flags & ASF_ENABLE_FLAG))
2110                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2111 #endif
2112         if (dev->flags & IFF_PROMISC) {
2113                 /* Promiscuous mode. */
2114                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2115                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2116                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2117         }
2118         else if (dev->flags & IFF_ALLMULTI) {
2119                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2120                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2121                                0xffffffff);
2122                 }
2123                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2124         }
2125         else {
2126                 /* Accept one or more multicast(s). */
2127                 struct dev_mc_list *mclist;
2128                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2129                 u32 regidx;
2130                 u32 bit;
2131                 u32 crc;
2132
2133                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2134
2135                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2136                      i++, mclist = mclist->next) {
2137
2138                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2139                         bit = crc & 0xff;
2140                         regidx = (bit & 0xe0) >> 5;
2141                         bit &= 0x1f;
2142                         mc_filter[regidx] |= (1 << bit);
2143                 }
2144
2145                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2146                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2147                                mc_filter[i]);
2148                 }
2149
2150                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2151         }
2152
2153         if (rx_mode != bp->rx_mode) {
2154                 bp->rx_mode = rx_mode;
2155                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2156         }
2157
2158         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2159         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2160         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2161
2162         spin_unlock_bh(&bp->phy_lock);
2163 }
2164
2165 #define FW_BUF_SIZE     0x8000
2166
2167 static int
2168 bnx2_gunzip_init(struct bnx2 *bp)
2169 {
2170         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2171                 goto gunzip_nomem1;
2172
2173         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2174                 goto gunzip_nomem2;
2175
2176         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2177         if (bp->strm->workspace == NULL)
2178                 goto gunzip_nomem3;
2179
2180         return 0;
2181
2182 gunzip_nomem3:
2183         kfree(bp->strm);
2184         bp->strm = NULL;
2185
2186 gunzip_nomem2:
2187         vfree(bp->gunzip_buf);
2188         bp->gunzip_buf = NULL;
2189
2190 gunzip_nomem1:
2191         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2192                             "uncompression.\n", bp->dev->name);
2193         return -ENOMEM;
2194 }
2195
2196 static void
2197 bnx2_gunzip_end(struct bnx2 *bp)
2198 {
2199         kfree(bp->strm->workspace);
2200
2201         kfree(bp->strm);
2202         bp->strm = NULL;
2203
2204         if (bp->gunzip_buf) {
2205                 vfree(bp->gunzip_buf);
2206                 bp->gunzip_buf = NULL;
2207         }
2208 }
2209
2210 static int
2211 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2212 {
2213         int n, rc;
2214
2215         /* check gzip header */
2216         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2217                 return -EINVAL;
2218
2219         n = 10;
2220
2221 #define FNAME   0x8
2222         if (zbuf[3] & FNAME)
2223                 while ((zbuf[n++] != 0) && (n < len));
2224
2225         bp->strm->next_in = zbuf + n;
2226         bp->strm->avail_in = len - n;
2227         bp->strm->next_out = bp->gunzip_buf;
2228         bp->strm->avail_out = FW_BUF_SIZE;
2229
2230         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2231         if (rc != Z_OK)
2232                 return rc;
2233
2234         rc = zlib_inflate(bp->strm, Z_FINISH);
2235
2236         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2237         *outbuf = bp->gunzip_buf;
2238
2239         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2240                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2241                        bp->dev->name, bp->strm->msg);
2242
2243         zlib_inflateEnd(bp->strm);
2244
2245         if (rc == Z_STREAM_END)
2246                 return 0;
2247
2248         return rc;
2249 }
2250
2251 static void
2252 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2253         u32 rv2p_proc)
2254 {
2255         int i;
2256         u32 val;
2257
2258
2259         for (i = 0; i < rv2p_code_len; i += 8) {
2260                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2261                 rv2p_code++;
2262                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2263                 rv2p_code++;
2264
2265                 if (rv2p_proc == RV2P_PROC1) {
2266                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2267                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2268                 }
2269                 else {
2270                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2271                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2272                 }
2273         }
2274
2275         /* Reset the processor, un-stall is done later. */
2276         if (rv2p_proc == RV2P_PROC1) {
2277                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2278         }
2279         else {
2280                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2281         }
2282 }
2283
2284 static int
2285 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2286 {
2287         u32 offset;
2288         u32 val;
2289         int rc;
2290
2291         /* Halt the CPU. */
2292         val = REG_RD_IND(bp, cpu_reg->mode);
2293         val |= cpu_reg->mode_value_halt;
2294         REG_WR_IND(bp, cpu_reg->mode, val);
2295         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2296
2297         /* Load the Text area. */
2298         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2299         if (fw->gz_text) {
2300                 u32 text_len;
2301                 void *text;
2302
2303                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2304                                  &text_len);
2305                 if (rc)
2306                         return rc;
2307
2308                 fw->text = text;
2309         }
2310         if (fw->gz_text) {
2311                 int j;
2312
2313                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2314                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2315                 }
2316         }
2317
2318         /* Load the Data area. */
2319         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2320         if (fw->data) {
2321                 int j;
2322
2323                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2324                         REG_WR_IND(bp, offset, fw->data[j]);
2325                 }
2326         }
2327
2328         /* Load the SBSS area. */
2329         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2330         if (fw->sbss) {
2331                 int j;
2332
2333                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2334                         REG_WR_IND(bp, offset, fw->sbss[j]);
2335                 }
2336         }
2337
2338         /* Load the BSS area. */
2339         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2340         if (fw->bss) {
2341                 int j;
2342
2343                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2344                         REG_WR_IND(bp, offset, fw->bss[j]);
2345                 }
2346         }
2347
2348         /* Load the Read-Only area. */
2349         offset = cpu_reg->spad_base +
2350                 (fw->rodata_addr - cpu_reg->mips_view_base);
2351         if (fw->rodata) {
2352                 int j;
2353
2354                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2355                         REG_WR_IND(bp, offset, fw->rodata[j]);
2356                 }
2357         }
2358
2359         /* Clear the pre-fetch instruction. */
2360         REG_WR_IND(bp, cpu_reg->inst, 0);
2361         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2362
2363         /* Start the CPU. */
2364         val = REG_RD_IND(bp, cpu_reg->mode);
2365         val &= ~cpu_reg->mode_value_halt;
2366         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2367         REG_WR_IND(bp, cpu_reg->mode, val);
2368
2369         return 0;
2370 }
2371
2372 static int
2373 bnx2_init_cpus(struct bnx2 *bp)
2374 {
2375         struct cpu_reg cpu_reg;
2376         struct fw_info *fw;
2377         int rc = 0;
2378         void *text;
2379         u32 text_len;
2380
2381         if ((rc = bnx2_gunzip_init(bp)) != 0)
2382                 return rc;
2383
2384         /* Initialize the RV2P processor. */
2385         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2386                          &text_len);
2387         if (rc)
2388                 goto init_cpu_err;
2389
2390         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2391
2392         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2393                          &text_len);
2394         if (rc)
2395                 goto init_cpu_err;
2396
2397         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2398
2399         /* Initialize the RX Processor. */
2400         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2401         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2402         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2403         cpu_reg.state = BNX2_RXP_CPU_STATE;
2404         cpu_reg.state_value_clear = 0xffffff;
2405         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2406         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2407         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2408         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2409         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2410         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2411         cpu_reg.mips_view_base = 0x8000000;
2412
2413         fw = &bnx2_rxp_fw_06;
2414
2415         rc = load_cpu_fw(bp, &cpu_reg, fw);
2416         if (rc)
2417                 goto init_cpu_err;
2418
2419         /* Initialize the TX Processor. */
2420         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2421         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2422         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2423         cpu_reg.state = BNX2_TXP_CPU_STATE;
2424         cpu_reg.state_value_clear = 0xffffff;
2425         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2426         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2427         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2428         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2429         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2430         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2431         cpu_reg.mips_view_base = 0x8000000;
2432
2433         fw = &bnx2_txp_fw_06;
2434
2435         rc = load_cpu_fw(bp, &cpu_reg, fw);
2436         if (rc)
2437                 goto init_cpu_err;
2438
2439         /* Initialize the TX Patch-up Processor. */
2440         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2441         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2442         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2443         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2444         cpu_reg.state_value_clear = 0xffffff;
2445         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2446         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2447         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2448         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2449         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2450         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2451         cpu_reg.mips_view_base = 0x8000000;
2452
2453         fw = &bnx2_tpat_fw_06;
2454
2455         rc = load_cpu_fw(bp, &cpu_reg, fw);
2456         if (rc)
2457                 goto init_cpu_err;
2458
2459         /* Initialize the Completion Processor. */
2460         cpu_reg.mode = BNX2_COM_CPU_MODE;
2461         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2462         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2463         cpu_reg.state = BNX2_COM_CPU_STATE;
2464         cpu_reg.state_value_clear = 0xffffff;
2465         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2466         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2467         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2468         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2469         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2470         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2471         cpu_reg.mips_view_base = 0x8000000;
2472
2473         fw = &bnx2_com_fw_06;
2474
2475         rc = load_cpu_fw(bp, &cpu_reg, fw);
2476         if (rc)
2477                 goto init_cpu_err;
2478
2479 init_cpu_err:
2480         bnx2_gunzip_end(bp);
2481         return rc;
2482 }
2483
2484 static int
2485 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2486 {
2487         u16 pmcsr;
2488
2489         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2490
2491         switch (state) {
2492         case PCI_D0: {
2493                 u32 val;
2494
2495                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2496                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2497                         PCI_PM_CTRL_PME_STATUS);
2498
2499                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2500                         /* delay required during transition out of D3hot */
2501                         msleep(20);
2502
2503                 val = REG_RD(bp, BNX2_EMAC_MODE);
2504                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2505                 val &= ~BNX2_EMAC_MODE_MPKT;
2506                 REG_WR(bp, BNX2_EMAC_MODE, val);
2507
2508                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2509                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2510                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2511                 break;
2512         }
2513         case PCI_D3hot: {
2514                 int i;
2515                 u32 val, wol_msg;
2516
2517                 if (bp->wol) {
2518                         u32 advertising;
2519                         u8 autoneg;
2520
2521                         autoneg = bp->autoneg;
2522                         advertising = bp->advertising;
2523
2524                         bp->autoneg = AUTONEG_SPEED;
2525                         bp->advertising = ADVERTISED_10baseT_Half |
2526                                 ADVERTISED_10baseT_Full |
2527                                 ADVERTISED_100baseT_Half |
2528                                 ADVERTISED_100baseT_Full |
2529                                 ADVERTISED_Autoneg;
2530
2531                         bnx2_setup_copper_phy(bp);
2532
2533                         bp->autoneg = autoneg;
2534                         bp->advertising = advertising;
2535
2536                         bnx2_set_mac_addr(bp);
2537
2538                         val = REG_RD(bp, BNX2_EMAC_MODE);
2539
2540                         /* Enable port mode. */
2541                         val &= ~BNX2_EMAC_MODE_PORT;
2542                         val |= BNX2_EMAC_MODE_PORT_MII |
2543                                BNX2_EMAC_MODE_MPKT_RCVD |
2544                                BNX2_EMAC_MODE_ACPI_RCVD |
2545                                BNX2_EMAC_MODE_MPKT;
2546
2547                         REG_WR(bp, BNX2_EMAC_MODE, val);
2548
2549                         /* receive all multicast */
2550                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2551                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2552                                        0xffffffff);
2553                         }
2554                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2555                                BNX2_EMAC_RX_MODE_SORT_MODE);
2556
2557                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2558                               BNX2_RPM_SORT_USER0_MC_EN;
2559                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2560                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2561                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2562                                BNX2_RPM_SORT_USER0_ENA);
2563
2564                         /* Need to enable EMAC and RPM for WOL. */
2565                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2566                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2567                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2568                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2569
2570                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2571                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2572                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2573
2574                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2575                 }
2576                 else {
2577                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2578                 }
2579
2580                 if (!(bp->flags & NO_WOL_FLAG))
2581                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2582
2583                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2584                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2585                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2586
2587                         if (bp->wol)
2588                                 pmcsr |= 3;
2589                 }
2590                 else {
2591                         pmcsr |= 3;
2592                 }
2593                 if (bp->wol) {
2594                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2595                 }
2596                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2597                                       pmcsr);
2598
2599                 /* No more memory access after this point until
2600                  * device is brought back to D0.
2601                  */
2602                 udelay(50);
2603                 break;
2604         }
2605         default:
2606                 return -EINVAL;
2607         }
2608         return 0;
2609 }
2610
2611 static int
2612 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2613 {
2614         u32 val;
2615         int j;
2616
2617         /* Request access to the flash interface. */
2618         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2619         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2620                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2621                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2622                         break;
2623
2624                 udelay(5);
2625         }
2626
2627         if (j >= NVRAM_TIMEOUT_COUNT)
2628                 return -EBUSY;
2629
2630         return 0;
2631 }
2632
2633 static int
2634 bnx2_release_nvram_lock(struct bnx2 *bp)
2635 {
2636         int j;
2637         u32 val;
2638
2639         /* Relinquish nvram interface. */
2640         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2641
2642         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2643                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2644                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2645                         break;
2646
2647                 udelay(5);
2648         }
2649
2650         if (j >= NVRAM_TIMEOUT_COUNT)
2651                 return -EBUSY;
2652
2653         return 0;
2654 }
2655
2656
2657 static int
2658 bnx2_enable_nvram_write(struct bnx2 *bp)
2659 {
2660         u32 val;
2661
2662         val = REG_RD(bp, BNX2_MISC_CFG);
2663         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2664
2665         if (!bp->flash_info->buffered) {
2666                 int j;
2667
2668                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669                 REG_WR(bp, BNX2_NVM_COMMAND,
2670                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2671
2672                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2673                         udelay(5);
2674
2675                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2676                         if (val & BNX2_NVM_COMMAND_DONE)
2677                                 break;
2678                 }
2679
2680                 if (j >= NVRAM_TIMEOUT_COUNT)
2681                         return -EBUSY;
2682         }
2683         return 0;
2684 }
2685
2686 static void
2687 bnx2_disable_nvram_write(struct bnx2 *bp)
2688 {
2689         u32 val;
2690
2691         val = REG_RD(bp, BNX2_MISC_CFG);
2692         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2693 }
2694
2695
2696 static void
2697 bnx2_enable_nvram_access(struct bnx2 *bp)
2698 {
2699         u32 val;
2700
2701         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2702         /* Enable both bits, even on read. */
2703         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2704                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2705 }
2706
2707 static void
2708 bnx2_disable_nvram_access(struct bnx2 *bp)
2709 {
2710         u32 val;
2711
2712         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2713         /* Disable both bits, even after read. */
2714         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2715                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2716                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2717 }
2718
2719 static int
2720 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2721 {
2722         u32 cmd;
2723         int j;
2724
2725         if (bp->flash_info->buffered)
2726                 /* Buffered flash, no erase needed */
2727                 return 0;
2728
2729         /* Build an erase command */
2730         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2731               BNX2_NVM_COMMAND_DOIT;
2732
2733         /* Need to clear DONE bit separately. */
2734         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2735
2736         /* Address of the NVRAM to read from. */
2737         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2738
2739         /* Issue an erase command. */
2740         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2741
2742         /* Wait for completion. */
2743         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2744                 u32 val;
2745
2746                 udelay(5);
2747
2748                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2749                 if (val & BNX2_NVM_COMMAND_DONE)
2750                         break;
2751         }
2752
2753         if (j >= NVRAM_TIMEOUT_COUNT)
2754                 return -EBUSY;
2755
2756         return 0;
2757 }
2758
2759 static int
2760 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2761 {
2762         u32 cmd;
2763         int j;
2764
2765         /* Build the command word. */
2766         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2767
2768         /* Calculate an offset of a buffered flash. */
2769         if (bp->flash_info->buffered) {
2770                 offset = ((offset / bp->flash_info->page_size) <<
2771                            bp->flash_info->page_bits) +
2772                           (offset % bp->flash_info->page_size);
2773         }
2774
2775         /* Need to clear DONE bit separately. */
2776         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2777
2778         /* Address of the NVRAM to read from. */
2779         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2780
2781         /* Issue a read command. */
2782         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2783
2784         /* Wait for completion. */
2785         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2786                 u32 val;
2787
2788                 udelay(5);
2789
2790                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2791                 if (val & BNX2_NVM_COMMAND_DONE) {
2792                         val = REG_RD(bp, BNX2_NVM_READ);
2793
2794                         val = be32_to_cpu(val);
2795                         memcpy(ret_val, &val, 4);
2796                         break;
2797                 }
2798         }
2799         if (j >= NVRAM_TIMEOUT_COUNT)
2800                 return -EBUSY;
2801
2802         return 0;
2803 }
2804
2805
2806 static int
2807 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2808 {
2809         u32 cmd, val32;
2810         int j;
2811
2812         /* Build the command word. */
2813         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2814
2815         /* Calculate an offset of a buffered flash. */
2816         if (bp->flash_info->buffered) {
2817                 offset = ((offset / bp->flash_info->page_size) <<
2818                           bp->flash_info->page_bits) +
2819                          (offset % bp->flash_info->page_size);
2820         }
2821
2822         /* Need to clear DONE bit separately. */
2823         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2824
2825         memcpy(&val32, val, 4);
2826         val32 = cpu_to_be32(val32);
2827
2828         /* Write the data. */
2829         REG_WR(bp, BNX2_NVM_WRITE, val32);
2830
2831         /* Address of the NVRAM to write to. */
2832         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2833
2834         /* Issue the write command. */
2835         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2836
2837         /* Wait for completion. */
2838         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2839                 udelay(5);
2840
2841                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2842                         break;
2843         }
2844         if (j >= NVRAM_TIMEOUT_COUNT)
2845                 return -EBUSY;
2846
2847         return 0;
2848 }
2849
2850 static int
2851 bnx2_init_nvram(struct bnx2 *bp)
2852 {
2853         u32 val;
2854         int j, entry_count, rc;
2855         struct flash_spec *flash;
2856
2857         /* Determine the selected interface. */
2858         val = REG_RD(bp, BNX2_NVM_CFG1);
2859
2860         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2861
2862         rc = 0;
2863         if (val & 0x40000000) {
2864
2865                 /* Flash interface has been reconfigured */
2866                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2867                      j++, flash++) {
2868                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2869                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2870                                 bp->flash_info = flash;
2871                                 break;
2872                         }
2873                 }
2874         }
2875         else {
2876                 u32 mask;
2877                 /* Not yet been reconfigured */
2878
2879                 if (val & (1 << 23))
2880                         mask = FLASH_BACKUP_STRAP_MASK;
2881                 else
2882                         mask = FLASH_STRAP_MASK;
2883
2884                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2885                         j++, flash++) {
2886
2887                         if ((val & mask) == (flash->strapping & mask)) {
2888                                 bp->flash_info = flash;
2889
2890                                 /* Request access to the flash interface. */
2891                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2892                                         return rc;
2893
2894                                 /* Enable access to flash interface */
2895                                 bnx2_enable_nvram_access(bp);
2896
2897                                 /* Reconfigure the flash interface */
2898                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2899                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2900                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2901                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2902
2903                                 /* Disable access to flash interface */
2904                                 bnx2_disable_nvram_access(bp);
2905                                 bnx2_release_nvram_lock(bp);
2906
2907                                 break;
2908                         }
2909                 }
2910         } /* if (val & 0x40000000) */
2911
2912         if (j == entry_count) {
2913                 bp->flash_info = NULL;
2914                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2915                 return -ENODEV;
2916         }
2917
2918         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2919         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2920         if (val)
2921                 bp->flash_size = val;
2922         else
2923                 bp->flash_size = bp->flash_info->total_size;
2924
2925         return rc;
2926 }
2927
2928 static int
2929 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2930                 int buf_size)
2931 {
2932         int rc = 0;
2933         u32 cmd_flags, offset32, len32, extra;
2934
2935         if (buf_size == 0)
2936                 return 0;
2937
2938         /* Request access to the flash interface. */
2939         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940                 return rc;
2941
2942         /* Enable access to flash interface */
2943         bnx2_enable_nvram_access(bp);
2944
2945         len32 = buf_size;
2946         offset32 = offset;
2947         extra = 0;
2948
2949         cmd_flags = 0;
2950
2951         if (offset32 & 3) {
2952                 u8 buf[4];
2953                 u32 pre_len;
2954
2955                 offset32 &= ~3;
2956                 pre_len = 4 - (offset & 3);
2957
2958                 if (pre_len >= len32) {
2959                         pre_len = len32;
2960                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2961                                     BNX2_NVM_COMMAND_LAST;
2962                 }
2963                 else {
2964                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2965                 }
2966
2967                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2968
2969                 if (rc)
2970                         return rc;
2971
2972                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2973
2974                 offset32 += 4;
2975                 ret_buf += pre_len;
2976                 len32 -= pre_len;
2977         }
2978         if (len32 & 3) {
2979                 extra = 4 - (len32 & 3);
2980                 len32 = (len32 + 4) & ~3;
2981         }
2982
2983         if (len32 == 4) {
2984                 u8 buf[4];
2985
2986                 if (cmd_flags)
2987                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2988                 else
2989                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2990                                     BNX2_NVM_COMMAND_LAST;
2991
2992                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2993
2994                 memcpy(ret_buf, buf, 4 - extra);
2995         }
2996         else if (len32 > 0) {
2997                 u8 buf[4];
2998
2999                 /* Read the first word. */
3000                 if (cmd_flags)
3001                         cmd_flags = 0;
3002                 else
3003                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3004
3005                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3006
3007                 /* Advance to the next dword. */
3008                 offset32 += 4;
3009                 ret_buf += 4;
3010                 len32 -= 4;
3011
3012                 while (len32 > 4 && rc == 0) {
3013                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3014
3015                         /* Advance to the next dword. */
3016                         offset32 += 4;
3017                         ret_buf += 4;
3018                         len32 -= 4;
3019                 }
3020
3021                 if (rc)
3022                         return rc;
3023
3024                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3025                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3026
3027                 memcpy(ret_buf, buf, 4 - extra);
3028         }
3029
3030         /* Disable access to flash interface */
3031         bnx2_disable_nvram_access(bp);
3032
3033         bnx2_release_nvram_lock(bp);
3034
3035         return rc;
3036 }
3037
3038 static int
3039 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3040                 int buf_size)
3041 {
3042         u32 written, offset32, len32;
3043         u8 *buf, start[4], end[4], *flash_buffer = NULL;
3044         int rc = 0;
3045         int align_start, align_end;
3046
3047         buf = data_buf;
3048         offset32 = offset;
3049         len32 = buf_size;
3050         align_start = align_end = 0;
3051
3052         if ((align_start = (offset32 & 3))) {
3053                 offset32 &= ~3;
3054                 len32 += align_start;
3055                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3056                         return rc;
3057         }
3058
3059         if (len32 & 3) {
3060                 if ((len32 > 4) || !align_start) {
3061                         align_end = 4 - (len32 & 3);
3062                         len32 += align_end;
3063                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3064                                 end, 4))) {
3065                                 return rc;
3066                         }
3067                 }
3068         }
3069
3070         if (align_start || align_end) {
3071                 buf = kmalloc(len32, GFP_KERNEL);
3072                 if (buf == 0)
3073                         return -ENOMEM;
3074                 if (align_start) {
3075                         memcpy(buf, start, 4);
3076                 }
3077                 if (align_end) {
3078                         memcpy(buf + len32 - 4, end, 4);
3079                 }
3080                 memcpy(buf + align_start, data_buf, buf_size);
3081         }
3082
3083         if (bp->flash_info->buffered == 0) {
3084                 flash_buffer = kmalloc(264, GFP_KERNEL);
3085                 if (flash_buffer == NULL) {
3086                         rc = -ENOMEM;
3087                         goto nvram_write_end;
3088                 }
3089         }
3090
3091         written = 0;
3092         while ((written < len32) && (rc == 0)) {
3093                 u32 page_start, page_end, data_start, data_end;
3094                 u32 addr, cmd_flags;
3095                 int i;
3096
3097                 /* Find the page_start addr */
3098                 page_start = offset32 + written;
3099                 page_start -= (page_start % bp->flash_info->page_size);
3100                 /* Find the page_end addr */
3101                 page_end = page_start + bp->flash_info->page_size;
3102                 /* Find the data_start addr */
3103                 data_start = (written == 0) ? offset32 : page_start;
3104                 /* Find the data_end addr */
3105                 data_end = (page_end > offset32 + len32) ?
3106                         (offset32 + len32) : page_end;
3107
3108                 /* Request access to the flash interface. */
3109                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3110                         goto nvram_write_end;
3111
3112                 /* Enable access to flash interface */
3113                 bnx2_enable_nvram_access(bp);
3114
3115                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3116                 if (bp->flash_info->buffered == 0) {
3117                         int j;
3118
3119                         /* Read the whole page into the buffer
3120                          * (non-buffer flash only) */
3121                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3122                                 if (j == (bp->flash_info->page_size - 4)) {
3123                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3124                                 }
3125                                 rc = bnx2_nvram_read_dword(bp,
3126                                         page_start + j,
3127                                         &flash_buffer[j],
3128                                         cmd_flags);
3129
3130                                 if (rc)
3131                                         goto nvram_write_end;
3132
3133                                 cmd_flags = 0;
3134                         }
3135                 }
3136
3137                 /* Enable writes to flash interface (unlock write-protect) */
3138                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3139                         goto nvram_write_end;
3140
3141                 /* Erase the page */
3142                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3143                         goto nvram_write_end;
3144
3145                 /* Re-enable the write again for the actual write */
3146                 bnx2_enable_nvram_write(bp);
3147
3148                 /* Loop to write back the buffer data from page_start to
3149                  * data_start */
3150                 i = 0;
3151                 if (bp->flash_info->buffered == 0) {
3152                         for (addr = page_start; addr < data_start;
3153                                 addr += 4, i += 4) {
3154
3155                                 rc = bnx2_nvram_write_dword(bp, addr,
3156                                         &flash_buffer[i], cmd_flags);
3157
3158                                 if (rc != 0)
3159                                         goto nvram_write_end;
3160
3161                                 cmd_flags = 0;
3162                         }
3163                 }
3164
3165                 /* Loop to write the new data from data_start to data_end */
3166                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3167                         if ((addr == page_end - 4) ||
3168                                 ((bp->flash_info->buffered) &&
3169                                  (addr == data_end - 4))) {
3170
3171                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3172                         }
3173                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3174                                 cmd_flags);
3175
3176                         if (rc != 0)
3177                                 goto nvram_write_end;
3178
3179                         cmd_flags = 0;
3180                         buf += 4;
3181                 }
3182
3183                 /* Loop to write back the buffer data from data_end
3184                  * to page_end */
3185                 if (bp->flash_info->buffered == 0) {
3186                         for (addr = data_end; addr < page_end;
3187                                 addr += 4, i += 4) {
3188
3189                                 if (addr == page_end-4) {
3190                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3191                                 }
3192                                 rc = bnx2_nvram_write_dword(bp, addr,
3193                                         &flash_buffer[i], cmd_flags);
3194
3195                                 if (rc != 0)
3196                                         goto nvram_write_end;
3197
3198                                 cmd_flags = 0;
3199                         }
3200                 }
3201
3202                 /* Disable writes to flash interface (lock write-protect) */
3203                 bnx2_disable_nvram_write(bp);
3204
3205                 /* Disable access to flash interface */
3206                 bnx2_disable_nvram_access(bp);
3207                 bnx2_release_nvram_lock(bp);
3208
3209                 /* Increment written */
3210                 written += data_end - data_start;
3211         }
3212
3213 nvram_write_end:
3214         if (bp->flash_info->buffered == 0)
3215                 kfree(flash_buffer);
3216
3217         if (align_start || align_end)
3218                 kfree(buf);
3219         return rc;
3220 }
3221
3222 static int
3223 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3224 {
3225         u32 val;
3226         int i, rc = 0;
3227
3228         /* Wait for the current PCI transaction to complete before
3229          * issuing a reset. */
3230         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3231                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3232                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3233                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3234                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3235         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3236         udelay(5);
3237
3238         /* Wait for the firmware to tell us it is ok to issue a reset. */
3239         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3240
3241         /* Deposit a driver reset signature so the firmware knows that
3242          * this is a soft reset. */
3243         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3244                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3245
3246         /* Do a dummy read to force the chip to complete all current transaction
3247          * before we issue a reset. */
3248         val = REG_RD(bp, BNX2_MISC_ID);
3249
3250         val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3251               BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3252               BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3253
3254         /* Chip reset. */
3255         REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3256
3257         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3258             (CHIP_ID(bp) == CHIP_ID_5706_A1))
3259                 msleep(15);
3260
3261         /* Reset takes approximate 30 usec */
3262         for (i = 0; i < 10; i++) {
3263                 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3264                 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3265                             BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3266                         break;
3267                 }
3268                 udelay(10);
3269         }
3270
3271         if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3272                    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3273                 printk(KERN_ERR PFX "Chip reset did not complete\n");
3274                 return -EBUSY;
3275         }
3276
3277         /* Make sure byte swapping is properly configured. */
3278         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3279         if (val != 0x01020304) {
3280                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3281                 return -ENODEV;
3282         }
3283
3284         /* Wait for the firmware to finish its initialization. */
3285         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3286         if (rc)
3287                 return rc;
3288
3289         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3290                 /* Adjust the voltage regular to two steps lower.  The default
3291                  * of this register is 0x0000000e. */
3292                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3293
3294                 /* Remove bad rbuf memory from the free pool. */
3295                 rc = bnx2_alloc_bad_rbuf(bp);
3296         }
3297
3298         return rc;
3299 }
3300
3301 static int
3302 bnx2_init_chip(struct bnx2 *bp)
3303 {
3304         u32 val;
3305         int rc;
3306
3307         /* Make sure the interrupt is not active. */
3308         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3309
3310         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3311               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3312 #ifdef __BIG_ENDIAN
3313               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3314 #endif
3315               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3316               DMA_READ_CHANS << 12 |
3317               DMA_WRITE_CHANS << 16;
3318
3319         val |= (0x2 << 20) | (1 << 11);
3320
3321         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3322                 val |= (1 << 23);
3323
3324         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3325             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3326                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3327
3328         REG_WR(bp, BNX2_DMA_CONFIG, val);
3329
3330         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3331                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3332                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3333                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3334         }
3335
3336         if (bp->flags & PCIX_FLAG) {
3337                 u16 val16;
3338
3339                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3340                                      &val16);
3341                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3342                                       val16 & ~PCI_X_CMD_ERO);
3343         }
3344
3345         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3346                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3347                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3348                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3349
3350         /* Initialize context mapping and zero out the quick contexts.  The
3351          * context block must have already been enabled. */
3352         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3353                 bnx2_init_5709_context(bp);
3354         else
3355                 bnx2_init_context(bp);
3356
3357         if ((rc = bnx2_init_cpus(bp)) != 0)
3358                 return rc;
3359
3360         bnx2_init_nvram(bp);
3361
3362         bnx2_set_mac_addr(bp);
3363
3364         val = REG_RD(bp, BNX2_MQ_CONFIG);
3365         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3366         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3367         REG_WR(bp, BNX2_MQ_CONFIG, val);
3368
3369         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3370         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3371         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3372
3373         val = (BCM_PAGE_BITS - 8) << 24;
3374         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3375
3376         /* Configure page size. */
3377         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3378         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3379         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3380         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3381
3382         val = bp->mac_addr[0] +
3383               (bp->mac_addr[1] << 8) +
3384               (bp->mac_addr[2] << 16) +
3385               bp->mac_addr[3] +
3386               (bp->mac_addr[4] << 8) +
3387               (bp->mac_addr[5] << 16);
3388         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3389
3390         /* Program the MTU.  Also include 4 bytes for CRC32. */
3391         val = bp->dev->mtu + ETH_HLEN + 4;
3392         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3393                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3394         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3395
3396         bp->last_status_idx = 0;
3397         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3398
3399         /* Set up how to generate a link change interrupt. */
3400         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3401
3402         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3403                (u64) bp->status_blk_mapping & 0xffffffff);
3404         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3405
3406         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3407                (u64) bp->stats_blk_mapping & 0xffffffff);
3408         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3409                (u64) bp->stats_blk_mapping >> 32);
3410
3411         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3412                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3413
3414         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3415                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3416
3417         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3418                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3419
3420         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3421
3422         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3423
3424         REG_WR(bp, BNX2_HC_COM_TICKS,
3425                (bp->com_ticks_int << 16) | bp->com_ticks);
3426
3427         REG_WR(bp, BNX2_HC_CMD_TICKS,
3428                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3429
3430         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3431         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3432
3433         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3434                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3435         else {
3436                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3437                        BNX2_HC_CONFIG_TX_TMR_MODE |
3438                        BNX2_HC_CONFIG_COLLECT_STATS);
3439         }
3440
3441         /* Clear internal stats counters. */
3442         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3443
3444         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3445
3446         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3447             BNX2_PORT_FEATURE_ASF_ENABLED)
3448                 bp->flags |= ASF_ENABLE_FLAG;
3449
3450         /* Initialize the receive filter. */
3451         bnx2_set_rx_mode(bp->dev);
3452
3453         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3454                           0);
3455
3456         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3457         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3458
3459         udelay(20);
3460
3461         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3462
3463         return rc;
3464 }
3465
3466 static void
3467 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3468 {
3469         u32 val, offset0, offset1, offset2, offset3;
3470
3471         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3472                 offset0 = BNX2_L2CTX_TYPE_XI;
3473                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3474                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3475                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3476         } else {
3477                 offset0 = BNX2_L2CTX_TYPE;
3478                 offset1 = BNX2_L2CTX_CMD_TYPE;
3479                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3480                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3481         }
3482         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3483         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3484
3485         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3486         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3487
3488         val = (u64) bp->tx_desc_mapping >> 32;
3489         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3490
3491         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3492         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3493 }
3494
3495 static void
3496 bnx2_init_tx_ring(struct bnx2 *bp)
3497 {
3498         struct tx_bd *txbd;
3499         u32 cid;
3500
3501         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3502
3503         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3504
3505         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3506         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3507
3508         bp->tx_prod = 0;
3509         bp->tx_cons = 0;
3510         bp->hw_tx_cons = 0;
3511         bp->tx_prod_bseq = 0;
3512
3513         cid = TX_CID;
3514         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3515         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3516
3517         bnx2_init_tx_context(bp, cid);
3518 }
3519
3520 static void
3521 bnx2_init_rx_ring(struct bnx2 *bp)
3522 {
3523         struct rx_bd *rxbd;
3524         int i;
3525         u16 prod, ring_prod;
3526         u32 val;
3527
3528         /* 8 for CRC and VLAN */
3529         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3530         /* hw alignment */
3531         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3532
3533         ring_prod = prod = bp->rx_prod = 0;
3534         bp->rx_cons = 0;
3535         bp->hw_rx_cons = 0;
3536         bp->rx_prod_bseq = 0;
3537
3538         for (i = 0; i < bp->rx_max_ring; i++) {
3539                 int j;
3540
3541                 rxbd = &bp->rx_desc_ring[i][0];
3542                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3543                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3544                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3545                 }
3546                 if (i == (bp->rx_max_ring - 1))
3547                         j = 0;
3548                 else
3549                         j = i + 1;
3550                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3551                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3552                                        0xffffffff;
3553         }
3554
3555         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3556         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3557         val |= 0x02 << 8;
3558         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3559
3560         val = (u64) bp->rx_desc_mapping[0] >> 32;
3561         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3562
3563         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3564         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3565
3566         for (i = 0; i < bp->rx_ring_size; i++) {
3567                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3568                         break;
3569                 }
3570                 prod = NEXT_RX_BD(prod);
3571                 ring_prod = RX_RING_IDX(prod);
3572         }
3573         bp->rx_prod = prod;
3574
3575         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3576
3577         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3578 }
3579
3580 static void
3581 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3582 {
3583         u32 num_rings, max;
3584
3585         bp->rx_ring_size = size;
3586         num_rings = 1;
3587         while (size > MAX_RX_DESC_CNT) {
3588                 size -= MAX_RX_DESC_CNT;
3589                 num_rings++;
3590         }
3591         /* round to next power of 2 */
3592         max = MAX_RX_RINGS;
3593         while ((max & num_rings) == 0)
3594                 max >>= 1;
3595
3596         if (num_rings != max)
3597                 max <<= 1;
3598
3599         bp->rx_max_ring = max;
3600         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3601 }
3602
3603 static void
3604 bnx2_free_tx_skbs(struct bnx2 *bp)
3605 {
3606         int i;
3607
3608         if (bp->tx_buf_ring == NULL)
3609                 return;
3610
3611         for (i = 0; i < TX_DESC_CNT; ) {
3612                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3613                 struct sk_buff *skb = tx_buf->skb;
3614                 int j, last;
3615
3616                 if (skb == NULL) {
3617                         i++;
3618                         continue;
3619                 }
3620
3621                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3622                         skb_headlen(skb), PCI_DMA_TODEVICE);
3623
3624                 tx_buf->skb = NULL;
3625
3626                 last = skb_shinfo(skb)->nr_frags;
3627                 for (j = 0; j < last; j++) {
3628                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3629                         pci_unmap_page(bp->pdev,
3630                                 pci_unmap_addr(tx_buf, mapping),
3631                                 skb_shinfo(skb)->frags[j].size,
3632                                 PCI_DMA_TODEVICE);
3633                 }
3634                 dev_kfree_skb(skb);
3635                 i += j + 1;
3636         }
3637
3638 }
3639
3640 static void
3641 bnx2_free_rx_skbs(struct bnx2 *bp)
3642 {
3643         int i;
3644
3645         if (bp->rx_buf_ring == NULL)
3646                 return;
3647
3648         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3649                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3650                 struct sk_buff *skb = rx_buf->skb;
3651
3652                 if (skb == NULL)
3653                         continue;
3654
3655                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3656                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3657
3658                 rx_buf->skb = NULL;
3659
3660                 dev_kfree_skb(skb);
3661         }
3662 }
3663
3664 static void
3665 bnx2_free_skbs(struct bnx2 *bp)
3666 {
3667         bnx2_free_tx_skbs(bp);
3668         bnx2_free_rx_skbs(bp);
3669 }
3670
3671 static int
3672 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3673 {
3674         int rc;
3675
3676         rc = bnx2_reset_chip(bp, reset_code);
3677         bnx2_free_skbs(bp);
3678         if (rc)
3679                 return rc;
3680
3681         if ((rc = bnx2_init_chip(bp)) != 0)
3682                 return rc;
3683
3684         bnx2_init_tx_ring(bp);
3685         bnx2_init_rx_ring(bp);
3686         return 0;
3687 }
3688
3689 static int
3690 bnx2_init_nic(struct bnx2 *bp)
3691 {
3692         int rc;
3693
3694         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3695                 return rc;
3696
3697         spin_lock_bh(&bp->phy_lock);
3698         bnx2_init_phy(bp);
3699         spin_unlock_bh(&bp->phy_lock);
3700         bnx2_set_link(bp);
3701         return 0;
3702 }
3703
3704 static int
3705 bnx2_test_registers(struct bnx2 *bp)
3706 {
3707         int ret;
3708         int i;
3709         static const struct {
3710                 u16   offset;
3711                 u16   flags;
3712                 u32   rw_mask;
3713                 u32   ro_mask;
3714         } reg_tbl[] = {
3715                 { 0x006c, 0, 0x00000000, 0x0000003f },
3716                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3717                 { 0x0094, 0, 0x00000000, 0x00000000 },
3718
3719                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3720                 { 0x0418, 0, 0x00000000, 0xffffffff },
3721                 { 0x041c, 0, 0x00000000, 0xffffffff },
3722                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3723                 { 0x0424, 0, 0x00000000, 0x00000000 },
3724                 { 0x0428, 0, 0x00000000, 0x00000001 },
3725                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3726                 { 0x0454, 0, 0x00000000, 0xffffffff },
3727                 { 0x0458, 0, 0x00000000, 0xffffffff },
3728
3729                 { 0x0808, 0, 0x00000000, 0xffffffff },
3730                 { 0x0854, 0, 0x00000000, 0xffffffff },
3731                 { 0x0868, 0, 0x00000000, 0x77777777 },
3732                 { 0x086c, 0, 0x00000000, 0x77777777 },
3733                 { 0x0870, 0, 0x00000000, 0x77777777 },
3734                 { 0x0874, 0, 0x00000000, 0x77777777 },
3735
3736                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3737                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3738                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3739
3740                 { 0x1000, 0, 0x00000000, 0x00000001 },
3741                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3742
3743                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3744                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3745                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3746                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3747                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3748                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3749                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3750                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3751                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3752                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3753
3754                 { 0x1800, 0, 0x00000000, 0x00000001 },
3755                 { 0x1804, 0, 0x00000000, 0x00000003 },
3756
3757                 { 0x2800, 0, 0x00000000, 0x00000001 },
3758                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3759                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3760                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3761                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3762                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3763                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3764                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3765                 { 0x2840, 0, 0x00000000, 0xffffffff },
3766                 { 0x2844, 0, 0x00000000, 0xffffffff },
3767                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3768                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3769
3770                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3771                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3772
3773                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3774                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3775                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3776                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3777                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3778                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3779                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3780                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3781                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3782
3783                 { 0x5004, 0, 0x00000000, 0x0000007f },
3784                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3785                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3786
3787                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3788                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3789                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3790                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3791                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3792                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3793                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3794                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3795                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3796
3797                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3798                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3799                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3800                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3801                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3802                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3803                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3804                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3805                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3806                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3807                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3808                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3809                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3810                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3811                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3812                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3813                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3814                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3815                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3816                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3817                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3818                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3819                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3820
3821                 { 0xffff, 0, 0x00000000, 0x00000000 },
3822         };
3823
3824         ret = 0;
3825         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3826                 u32 offset, rw_mask, ro_mask, save_val, val;
3827
3828                 offset = (u32) reg_tbl[i].offset;
3829                 rw_mask = reg_tbl[i].rw_mask;
3830                 ro_mask = reg_tbl[i].ro_mask;
3831
3832                 save_val = readl(bp->regview + offset);
3833
3834                 writel(0, bp->regview + offset);
3835
3836                 val = readl(bp->regview + offset);
3837                 if ((val & rw_mask) != 0) {
3838                         goto reg_test_err;
3839                 }
3840
3841                 if ((val & ro_mask) != (save_val & ro_mask)) {
3842                         goto reg_test_err;
3843                 }
3844
3845                 writel(0xffffffff, bp->regview + offset);
3846
3847                 val = readl(bp->regview + offset);
3848                 if ((val & rw_mask) != rw_mask) {
3849                         goto reg_test_err;
3850                 }
3851
3852                 if ((val & ro_mask) != (save_val & ro_mask)) {
3853                         goto reg_test_err;
3854                 }
3855
3856                 writel(save_val, bp->regview + offset);
3857                 continue;
3858
3859 reg_test_err:
3860                 writel(save_val, bp->regview + offset);
3861                 ret = -ENODEV;
3862                 break;
3863         }
3864         return ret;
3865 }
3866
3867 static int
3868 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3869 {
3870         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3871                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3872         int i;
3873
3874         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3875                 u32 offset;
3876
3877                 for (offset = 0; offset < size; offset += 4) {
3878
3879                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3880
3881                         if (REG_RD_IND(bp, start + offset) !=
3882                                 test_pattern[i]) {
3883                                 return -ENODEV;
3884                         }
3885                 }
3886         }
3887         return 0;
3888 }
3889
3890 static int
3891 bnx2_test_memory(struct bnx2 *bp)
3892 {
3893         int ret = 0;
3894         int i;
3895         static const struct {
3896                 u32   offset;
3897                 u32   len;
3898         } mem_tbl[] = {
3899                 { 0x60000,  0x4000 },
3900                 { 0xa0000,  0x3000 },
3901                 { 0xe0000,  0x4000 },
3902                 { 0x120000, 0x4000 },
3903                 { 0x1a0000, 0x4000 },
3904                 { 0x160000, 0x4000 },
3905                 { 0xffffffff, 0    },
3906         };
3907
3908         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3909                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3910                         mem_tbl[i].len)) != 0) {
3911                         return ret;
3912                 }
3913         }
3914
3915         return ret;
3916 }
3917
3918 #define BNX2_MAC_LOOPBACK       0
3919 #define BNX2_PHY_LOOPBACK       1
3920
3921 static int
3922 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3923 {
3924         unsigned int pkt_size, num_pkts, i;
3925         struct sk_buff *skb, *rx_skb;
3926         unsigned char *packet;
3927         u16 rx_start_idx, rx_idx;
3928         dma_addr_t map;
3929         struct tx_bd *txbd;
3930         struct sw_bd *rx_buf;
3931         struct l2_fhdr *rx_hdr;
3932         int ret = -ENODEV;
3933
3934         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3935                 bp->loopback = MAC_LOOPBACK;
3936                 bnx2_set_mac_loopback(bp);
3937         }
3938         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3939                 bp->loopback = PHY_LOOPBACK;
3940                 bnx2_set_phy_loopback(bp);
3941         }
3942         else
3943                 return -EINVAL;
3944
3945         pkt_size = 1514;
3946         skb = netdev_alloc_skb(bp->dev, pkt_size);
3947         if (!skb)
3948                 return -ENOMEM;
3949         packet = skb_put(skb, pkt_size);
3950         memcpy(packet, bp->mac_addr, 6);
3951         memset(packet + 6, 0x0, 8);
3952         for (i = 14; i < pkt_size; i++)
3953                 packet[i] = (unsigned char) (i & 0xff);
3954
3955         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3956                 PCI_DMA_TODEVICE);
3957
3958         REG_WR(bp, BNX2_HC_COMMAND,
3959                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3960
3961         REG_RD(bp, BNX2_HC_COMMAND);
3962
3963         udelay(5);
3964         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3965
3966         num_pkts = 0;
3967
3968         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3969
3970         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3971         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3972         txbd->tx_bd_mss_nbytes = pkt_size;
3973         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3974
3975         num_pkts++;
3976         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3977         bp->tx_prod_bseq += pkt_size;
3978
3979         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3980         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3981
3982         udelay(100);
3983
3984         REG_WR(bp, BNX2_HC_COMMAND,
3985                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3986
3987         REG_RD(bp, BNX2_HC_COMMAND);
3988
3989         udelay(5);
3990
3991         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3992         dev_kfree_skb(skb);
3993
3994         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3995                 goto loopback_test_done;
3996         }
3997
3998         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3999         if (rx_idx != rx_start_idx + num_pkts) {
4000                 goto loopback_test_done;
4001         }
4002
4003         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4004         rx_skb = rx_buf->skb;
4005
4006         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4007         skb_reserve(rx_skb, bp->rx_offset);
4008
4009         pci_dma_sync_single_for_cpu(bp->pdev,
4010                 pci_unmap_addr(rx_buf, mapping),
4011                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4012
4013         if (rx_hdr->l2_fhdr_status &
4014                 (L2_FHDR_ERRORS_BAD_CRC |
4015                 L2_FHDR_ERRORS_PHY_DECODE |
4016                 L2_FHDR_ERRORS_ALIGNMENT |
4017                 L2_FHDR_ERRORS_TOO_SHORT |
4018                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4019
4020                 goto loopback_test_done;
4021         }
4022
4023         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4024                 goto loopback_test_done;
4025         }
4026
4027         for (i = 14; i < pkt_size; i++) {
4028                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4029                         goto loopback_test_done;
4030                 }
4031         }
4032
4033         ret = 0;
4034
4035 loopback_test_done:
4036         bp->loopback = 0;
4037         return ret;
4038 }
4039
4040 #define BNX2_MAC_LOOPBACK_FAILED        1
4041 #define BNX2_PHY_LOOPBACK_FAILED        2
4042 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4043                                          BNX2_PHY_LOOPBACK_FAILED)
4044
4045 static int
4046 bnx2_test_loopback(struct bnx2 *bp)
4047 {
4048         int rc = 0;
4049
4050         if (!netif_running(bp->dev))
4051                 return BNX2_LOOPBACK_FAILED;
4052
4053         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4054         spin_lock_bh(&bp->phy_lock);
4055         bnx2_init_phy(bp);
4056         spin_unlock_bh(&bp->phy_lock);
4057         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4058                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4059         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4060                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4061         return rc;
4062 }
4063
4064 #define NVRAM_SIZE 0x200
4065 #define CRC32_RESIDUAL 0xdebb20e3
4066
4067 static int
4068 bnx2_test_nvram(struct bnx2 *bp)
4069 {
4070         u32 buf[NVRAM_SIZE / 4];
4071         u8 *data = (u8 *) buf;
4072         int rc = 0;
4073         u32 magic, csum;
4074
4075         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4076                 goto test_nvram_done;
4077
4078         magic = be32_to_cpu(buf[0]);
4079         if (magic != 0x669955aa) {
4080                 rc = -ENODEV;
4081                 goto test_nvram_done;
4082         }
4083
4084         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4085                 goto test_nvram_done;
4086
4087         csum = ether_crc_le(0x100, data);
4088         if (csum != CRC32_RESIDUAL) {
4089                 rc = -ENODEV;
4090                 goto test_nvram_done;
4091         }
4092
4093         csum = ether_crc_le(0x100, data + 0x100);
4094         if (csum != CRC32_RESIDUAL) {
4095                 rc = -ENODEV;
4096         }
4097
4098 test_nvram_done:
4099         return rc;
4100 }
4101
4102 static int
4103 bnx2_test_link(struct bnx2 *bp)
4104 {
4105         u32 bmsr;
4106
4107         spin_lock_bh(&bp->phy_lock);
4108         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4109         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4110         spin_unlock_bh(&bp->phy_lock);
4111
4112         if (bmsr & BMSR_LSTATUS) {
4113                 return 0;
4114         }
4115         return -ENODEV;
4116 }
4117
4118 static int
4119 bnx2_test_intr(struct bnx2 *bp)
4120 {
4121         int i;
4122         u16 status_idx;
4123
4124         if (!netif_running(bp->dev))
4125                 return -ENODEV;
4126
4127         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4128
4129         /* This register is not touched during run-time. */
4130         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4131         REG_RD(bp, BNX2_HC_COMMAND);
4132
4133         for (i = 0; i < 10; i++) {
4134                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4135                         status_idx) {
4136
4137                         break;
4138                 }
4139
4140                 msleep_interruptible(10);
4141         }
4142         if (i < 10)
4143                 return 0;
4144
4145         return -ENODEV;
4146 }
4147
4148 static void
4149 bnx2_5706_serdes_timer(struct bnx2 *bp)
4150 {
4151         spin_lock(&bp->phy_lock);
4152         if (bp->serdes_an_pending)
4153                 bp->serdes_an_pending--;
4154         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4155                 u32 bmcr;
4156
4157                 bp->current_interval = bp->timer_interval;
4158
4159                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4160
4161                 if (bmcr & BMCR_ANENABLE) {
4162                         u32 phy1, phy2;
4163
4164                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4165                         bnx2_read_phy(bp, 0x1c, &phy1);
4166
4167                         bnx2_write_phy(bp, 0x17, 0x0f01);
4168                         bnx2_read_phy(bp, 0x15, &phy2);
4169                         bnx2_write_phy(bp, 0x17, 0x0f01);
4170                         bnx2_read_phy(bp, 0x15, &phy2);
4171
4172                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4173                                 !(phy2 & 0x20)) {       /* no CONFIG */
4174
4175                                 bmcr &= ~BMCR_ANENABLE;
4176                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4177                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4178                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4179                         }
4180                 }
4181         }
4182         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4183                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4184                 u32 phy2;
4185
4186                 bnx2_write_phy(bp, 0x17, 0x0f01);
4187                 bnx2_read_phy(bp, 0x15, &phy2);
4188                 if (phy2 & 0x20) {
4189                         u32 bmcr;
4190
4191                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4192                         bmcr |= BMCR_ANENABLE;
4193                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4194
4195                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4196                 }
4197         } else
4198                 bp->current_interval = bp->timer_interval;
4199
4200         spin_unlock(&bp->phy_lock);
4201 }
4202
4203 static void
4204 bnx2_5708_serdes_timer(struct bnx2 *bp)
4205 {
4206         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4207                 bp->serdes_an_pending = 0;
4208                 return;
4209         }
4210
4211         spin_lock(&bp->phy_lock);
4212         if (bp->serdes_an_pending)
4213                 bp->serdes_an_pending--;
4214         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4215                 u32 bmcr;
4216
4217                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4218
4219                 if (bmcr & BMCR_ANENABLE) {
4220                         bmcr &= ~BMCR_ANENABLE;
4221                         bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4222                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4223                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4224                 } else {
4225                         bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4226                         bmcr |= BMCR_ANENABLE;
4227                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4228                         bp->serdes_an_pending = 2;
4229                         bp->current_interval = bp->timer_interval;
4230                 }
4231
4232         } else
4233                 bp->current_interval = bp->timer_interval;
4234
4235         spin_unlock(&bp->phy_lock);
4236 }
4237
4238 static void
4239 bnx2_timer(unsigned long data)
4240 {
4241         struct bnx2 *bp = (struct bnx2 *) data;
4242         u32 msg;
4243
4244         if (!netif_running(bp->dev))
4245                 return;
4246
4247         if (atomic_read(&bp->intr_sem) != 0)
4248                 goto bnx2_restart_timer;
4249
4250         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4251         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4252
4253         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4254
4255         if (bp->phy_flags & PHY_SERDES_FLAG) {
4256                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4257                         bnx2_5706_serdes_timer(bp);
4258                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4259                         bnx2_5708_serdes_timer(bp);
4260         }
4261
4262 bnx2_restart_timer:
4263         mod_timer(&bp->timer, jiffies + bp->current_interval);
4264 }
4265
4266 /* Called with rtnl_lock */
4267 static int
4268 bnx2_open(struct net_device *dev)
4269 {
4270         struct bnx2 *bp = netdev_priv(dev);
4271         int rc;
4272
4273         bnx2_set_power_state(bp, PCI_D0);
4274         bnx2_disable_int(bp);
4275
4276         rc = bnx2_alloc_mem(bp);
4277         if (rc)
4278                 return rc;
4279
4280         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4281                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4282                 !disable_msi) {
4283
4284                 if (pci_enable_msi(bp->pdev) == 0) {
4285                         bp->flags |= USING_MSI_FLAG;
4286                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4287                                         dev);
4288                 }
4289                 else {
4290                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4291                                         IRQF_SHARED, dev->name, dev);
4292                 }
4293         }
4294         else {
4295                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4296                                 dev->name, dev);
4297         }
4298         if (rc) {
4299                 bnx2_free_mem(bp);
4300                 return rc;
4301         }
4302
4303         rc = bnx2_init_nic(bp);
4304
4305         if (rc) {
4306                 free_irq(bp->pdev->irq, dev);
4307                 if (bp->flags & USING_MSI_FLAG) {
4308                         pci_disable_msi(bp->pdev);
4309                         bp->flags &= ~USING_MSI_FLAG;
4310                 }
4311                 bnx2_free_skbs(bp);
4312                 bnx2_free_mem(bp);
4313                 return rc;
4314         }
4315
4316         mod_timer(&bp->timer, jiffies + bp->current_interval);
4317
4318         atomic_set(&bp->intr_sem, 0);
4319
4320         bnx2_enable_int(bp);
4321
4322         if (bp->flags & USING_MSI_FLAG) {
4323                 /* Test MSI to make sure it is working
4324                  * If MSI test fails, go back to INTx mode
4325                  */
4326                 if (bnx2_test_intr(bp) != 0) {
4327                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4328                                " using MSI, switching to INTx mode. Please"
4329                                " report this failure to the PCI maintainer"
4330                                " and include system chipset information.\n",
4331                                bp->dev->name);
4332
4333                         bnx2_disable_int(bp);
4334                         free_irq(bp->pdev->irq, dev);
4335                         pci_disable_msi(bp->pdev);
4336                         bp->flags &= ~USING_MSI_FLAG;
4337
4338                         rc = bnx2_init_nic(bp);
4339
4340                         if (!rc) {
4341                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4342                                         IRQF_SHARED, dev->name, dev);
4343                         }
4344                         if (rc) {
4345                                 bnx2_free_skbs(bp);
4346                                 bnx2_free_mem(bp);
4347                                 del_timer_sync(&bp->timer);
4348                                 return rc;
4349                         }
4350                         bnx2_enable_int(bp);
4351                 }
4352         }
4353         if (bp->flags & USING_MSI_FLAG) {
4354                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4355         }
4356
4357         netif_start_queue(dev);
4358
4359         return 0;
4360 }
4361
4362 static void
4363 bnx2_reset_task(void *data)
4364 {
4365         struct bnx2 *bp = data;
4366
4367         if (!netif_running(bp->dev))
4368                 return;
4369
4370         bp->in_reset_task = 1;
4371         bnx2_netif_stop(bp);
4372
4373         bnx2_init_nic(bp);
4374
4375         atomic_set(&bp->intr_sem, 1);
4376         bnx2_netif_start(bp);
4377         bp->in_reset_task = 0;
4378 }
4379
4380 static void
4381 bnx2_tx_timeout(struct net_device *dev)
4382 {
4383         struct bnx2 *bp = netdev_priv(dev);
4384
4385         /* This allows the netif to be shutdown gracefully before resetting */
4386         schedule_work(&bp->reset_task);
4387 }
4388
4389 #ifdef BCM_VLAN
4390 /* Called with rtnl_lock */
4391 static void
4392 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4393 {
4394         struct bnx2 *bp = netdev_priv(dev);
4395
4396         bnx2_netif_stop(bp);
4397
4398         bp->vlgrp = vlgrp;
4399         bnx2_set_rx_mode(dev);
4400
4401         bnx2_netif_start(bp);
4402 }
4403
4404 /* Called with rtnl_lock */
4405 static void
4406 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4407 {
4408         struct bnx2 *bp = netdev_priv(dev);
4409
4410         bnx2_netif_stop(bp);
4411
4412         if (bp->vlgrp)
4413                 bp->vlgrp->vlan_devices[vid] = NULL;
4414         bnx2_set_rx_mode(dev);
4415
4416         bnx2_netif_start(bp);
4417 }
4418 #endif
4419
4420 /* Called with netif_tx_lock.
4421  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4422  * netif_wake_queue().
4423  */
4424 static int
4425 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4426 {
4427         struct bnx2 *bp = netdev_priv(dev);
4428         dma_addr_t mapping;
4429         struct tx_bd *txbd;
4430         struct sw_bd *tx_buf;
4431         u32 len, vlan_tag_flags, last_frag, mss;
4432         u16 prod, ring_prod;
4433         int i;
4434
4435         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4436                 netif_stop_queue(dev);
4437                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4438                         dev->name);
4439
4440                 return NETDEV_TX_BUSY;
4441         }
4442         len = skb_headlen(skb);
4443         prod = bp->tx_prod;
4444         ring_prod = TX_RING_IDX(prod);
4445
4446         vlan_tag_flags = 0;
4447         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4448                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4449         }
4450
4451         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4452                 vlan_tag_flags |=
4453                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4454         }
4455 #ifdef BCM_TSO
4456         if ((mss = skb_shinfo(skb)->gso_size) &&
4457                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4458                 u32 tcp_opt_len, ip_tcp_len;
4459
4460                 if (skb_header_cloned(skb) &&
4461                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4462                         dev_kfree_skb(skb);
4463                         return NETDEV_TX_OK;
4464                 }
4465
4466                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4467                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4468
4469                 tcp_opt_len = 0;
4470                 if (skb->h.th->doff > 5) {
4471                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4472                 }
4473                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4474
4475                 skb->nh.iph->check = 0;
4476                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4477                 skb->h.th->check =
4478                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4479                                             skb->nh.iph->daddr,
4480                                             0, IPPROTO_TCP, 0);
4481
4482                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4483                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4484                                 (tcp_opt_len >> 2)) << 8;
4485                 }
4486         }
4487         else
4488 #endif
4489         {
4490                 mss = 0;
4491         }
4492
4493         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4494
4495         tx_buf = &bp->tx_buf_ring[ring_prod];
4496         tx_buf->skb = skb;
4497         pci_unmap_addr_set(tx_buf, mapping, mapping);
4498
4499         txbd = &bp->tx_desc_ring[ring_prod];
4500
4501         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4502         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4503         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4504         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4505
4506         last_frag = skb_shinfo(skb)->nr_frags;
4507
4508         for (i = 0; i < last_frag; i++) {
4509                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4510
4511                 prod = NEXT_TX_BD(prod);
4512                 ring_prod = TX_RING_IDX(prod);
4513                 txbd = &bp->tx_desc_ring[ring_prod];
4514
4515                 len = frag->size;
4516                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4517                         len, PCI_DMA_TODEVICE);
4518                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4519                                 mapping, mapping);
4520
4521                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4522                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4523                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4524                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4525
4526         }
4527         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4528
4529         prod = NEXT_TX_BD(prod);
4530         bp->tx_prod_bseq += skb->len;
4531
4532         REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4533         REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4534
4535         mmiowb();
4536
4537         bp->tx_prod = prod;
4538         dev->trans_start = jiffies;
4539
4540         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4541                 netif_stop_queue(dev);
4542                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4543                         netif_wake_queue(dev);
4544         }
4545
4546         return NETDEV_TX_OK;
4547 }
4548
4549 /* Called with rtnl_lock */
4550 static int
4551 bnx2_close(struct net_device *dev)
4552 {
4553         struct bnx2 *bp = netdev_priv(dev);
4554         u32 reset_code;
4555
4556         /* Calling flush_scheduled_work() may deadlock because
4557          * linkwatch_event() may be on the workqueue and it will try to get
4558          * the rtnl_lock which we are holding.
4559          */
4560         while (bp->in_reset_task)
4561                 msleep(1);
4562
4563         bnx2_netif_stop(bp);
4564         del_timer_sync(&bp->timer);
4565         if (bp->flags & NO_WOL_FLAG)
4566                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4567         else if (bp->wol)
4568                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4569         else
4570                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4571         bnx2_reset_chip(bp, reset_code);
4572         free_irq(bp->pdev->irq, dev);
4573         if (bp->flags & USING_MSI_FLAG) {
4574                 pci_disable_msi(bp->pdev);
4575                 bp->flags &= ~USING_MSI_FLAG;
4576         }
4577         bnx2_free_skbs(bp);
4578         bnx2_free_mem(bp);
4579         bp->link_up = 0;
4580         netif_carrier_off(bp->dev);
4581         bnx2_set_power_state(bp, PCI_D3hot);
4582         return 0;
4583 }
4584
4585 #define GET_NET_STATS64(ctr)                                    \
4586         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4587         (unsigned long) (ctr##_lo)
4588
4589 #define GET_NET_STATS32(ctr)            \
4590         (ctr##_lo)
4591
4592 #if (BITS_PER_LONG == 64)
4593 #define GET_NET_STATS   GET_NET_STATS64
4594 #else
4595 #define GET_NET_STATS   GET_NET_STATS32
4596 #endif
4597
4598 static struct net_device_stats *
4599 bnx2_get_stats(struct net_device *dev)
4600 {
4601         struct bnx2 *bp = netdev_priv(dev);
4602         struct statistics_block *stats_blk = bp->stats_blk;
4603         struct net_device_stats *net_stats = &bp->net_stats;
4604
4605         if (bp->stats_blk == NULL) {
4606                 return net_stats;
4607         }
4608         net_stats->rx_packets =
4609                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4610                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4611                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4612
4613         net_stats->tx_packets =
4614                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4615                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4616                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4617
4618         net_stats->rx_bytes =
4619                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4620
4621         net_stats->tx_bytes =
4622                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4623
4624         net_stats->multicast =
4625                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4626
4627         net_stats->collisions =
4628                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4629
4630         net_stats->rx_length_errors =
4631                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4632                 stats_blk->stat_EtherStatsOverrsizePkts);
4633
4634         net_stats->rx_over_errors =
4635                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4636
4637         net_stats->rx_frame_errors =
4638                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4639
4640         net_stats->rx_crc_errors =
4641                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4642
4643         net_stats->rx_errors = net_stats->rx_length_errors +
4644                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4645                 net_stats->rx_crc_errors;
4646
4647         net_stats->tx_aborted_errors =
4648                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4649                 stats_blk->stat_Dot3StatsLateCollisions);
4650
4651         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4652             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4653                 net_stats->tx_carrier_errors = 0;
4654         else {
4655                 net_stats->tx_carrier_errors =
4656                         (unsigned long)
4657                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4658         }
4659
4660         net_stats->tx_errors =
4661                 (unsigned long)
4662                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4663                 +
4664                 net_stats->tx_aborted_errors +
4665                 net_stats->tx_carrier_errors;
4666
4667         net_stats->rx_missed_errors =
4668                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4669                 stats_blk->stat_FwRxDrop);
4670
4671         return net_stats;
4672 }
4673
4674 /* All ethtool functions called with rtnl_lock */
4675
4676 static int
4677 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4678 {
4679         struct bnx2 *bp = netdev_priv(dev);
4680
4681         cmd->supported = SUPPORTED_Autoneg;
4682         if (bp->phy_flags & PHY_SERDES_FLAG) {
4683                 cmd->supported |= SUPPORTED_1000baseT_Full |
4684                         SUPPORTED_FIBRE;
4685
4686                 cmd->port = PORT_FIBRE;
4687         }
4688         else {
4689                 cmd->supported |= SUPPORTED_10baseT_Half |
4690                         SUPPORTED_10baseT_Full |
4691                         SUPPORTED_100baseT_Half |
4692                         SUPPORTED_100baseT_Full |
4693                         SUPPORTED_1000baseT_Full |
4694                         SUPPORTED_TP;
4695
4696                 cmd->port = PORT_TP;
4697         }
4698
4699         cmd->advertising = bp->advertising;
4700
4701         if (bp->autoneg & AUTONEG_SPEED) {
4702                 cmd->autoneg = AUTONEG_ENABLE;
4703         }
4704         else {
4705                 cmd->autoneg = AUTONEG_DISABLE;
4706         }
4707
4708         if (netif_carrier_ok(dev)) {
4709                 cmd->speed = bp->line_speed;
4710                 cmd->duplex = bp->duplex;
4711         }
4712         else {
4713                 cmd->speed = -1;
4714                 cmd->duplex = -1;
4715         }
4716
4717         cmd->transceiver = XCVR_INTERNAL;
4718         cmd->phy_address = bp->phy_addr;
4719
4720         return 0;
4721 }
4722
4723 static int
4724 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4725 {
4726         struct bnx2 *bp = netdev_priv(dev);
4727         u8 autoneg = bp->autoneg;
4728         u8 req_duplex = bp->req_duplex;
4729         u16 req_line_speed = bp->req_line_speed;
4730         u32 advertising = bp->advertising;
4731
4732         if (cmd->autoneg == AUTONEG_ENABLE) {
4733                 autoneg |= AUTONEG_SPEED;
4734
4735                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4736
4737                 /* allow advertising 1 speed */
4738                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4739                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4740                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4741                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4742
4743                         if (bp->phy_flags & PHY_SERDES_FLAG)
4744                                 return -EINVAL;
4745
4746                         advertising = cmd->advertising;
4747
4748                 }
4749                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4750                         advertising = cmd->advertising;
4751                 }
4752                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4753                         return -EINVAL;
4754                 }
4755                 else {
4756                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4757                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4758                         }
4759                         else {
4760                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4761                         }
4762                 }
4763                 advertising |= ADVERTISED_Autoneg;
4764         }
4765         else {
4766                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4767                         if ((cmd->speed != SPEED_1000 &&
4768                              cmd->speed != SPEED_2500) ||
4769                             (cmd->duplex != DUPLEX_FULL))
4770                                 return -EINVAL;
4771
4772                         if (cmd->speed == SPEED_2500 &&
4773                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4774                                 return -EINVAL;
4775                 }
4776                 else if (cmd->speed == SPEED_1000) {
4777                         return -EINVAL;
4778                 }
4779                 autoneg &= ~AUTONEG_SPEED;
4780                 req_line_speed = cmd->speed;
4781                 req_duplex = cmd->duplex;
4782                 advertising = 0;
4783         }
4784
4785         bp->autoneg = autoneg;
4786         bp->advertising = advertising;
4787         bp->req_line_speed = req_line_speed;
4788         bp->req_duplex = req_duplex;
4789
4790         spin_lock_bh(&bp->phy_lock);
4791
4792         bnx2_setup_phy(bp);
4793
4794         spin_unlock_bh(&bp->phy_lock);
4795
4796         return 0;
4797 }
4798
4799 static void
4800 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4801 {
4802         struct bnx2 *bp = netdev_priv(dev);
4803
4804         strcpy(info->driver, DRV_MODULE_NAME);
4805         strcpy(info->version, DRV_MODULE_VERSION);
4806         strcpy(info->bus_info, pci_name(bp->pdev));
4807         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4808         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4809         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4810         info->fw_version[1] = info->fw_version[3] = '.';
4811         info->fw_version[5] = 0;
4812 }
4813
4814 #define BNX2_REGDUMP_LEN                (32 * 1024)
4815
4816 static int
4817 bnx2_get_regs_len(struct net_device *dev)
4818 {
4819         return BNX2_REGDUMP_LEN;
4820 }
4821
4822 static void
4823 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4824 {
4825         u32 *p = _p, i, offset;
4826         u8 *orig_p = _p;
4827         struct bnx2 *bp = netdev_priv(dev);
4828         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4829                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4830                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4831                                  0x1040, 0x1048, 0x1080, 0x10a4,
4832                                  0x1400, 0x1490, 0x1498, 0x14f0,
4833                                  0x1500, 0x155c, 0x1580, 0x15dc,
4834                                  0x1600, 0x1658, 0x1680, 0x16d8,
4835                                  0x1800, 0x1820, 0x1840, 0x1854,
4836                                  0x1880, 0x1894, 0x1900, 0x1984,
4837                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4838                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4839                                  0x2000, 0x2030, 0x23c0, 0x2400,
4840                                  0x2800, 0x2820, 0x2830, 0x2850,
4841                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4842                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4843                                  0x4080, 0x4090, 0x43c0, 0x4458,
4844                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4845                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4846                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4847                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4848                                  0x6800, 0x6848, 0x684c, 0x6860,
4849                                  0x6888, 0x6910, 0x8000 };
4850
4851         regs->version = 0;
4852
4853         memset(p, 0, BNX2_REGDUMP_LEN);
4854
4855         if (!netif_running(bp->dev))
4856                 return;
4857
4858         i = 0;
4859         offset = reg_boundaries[0];
4860         p += offset;
4861         while (offset < BNX2_REGDUMP_LEN) {
4862                 *p++ = REG_RD(bp, offset);
4863                 offset += 4;
4864                 if (offset == reg_boundaries[i + 1]) {
4865                         offset = reg_boundaries[i + 2];
4866                         p = (u32 *) (orig_p + offset);
4867                         i += 2;
4868                 }
4869         }
4870 }
4871
4872 static void
4873 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4874 {
4875         struct bnx2 *bp = netdev_priv(dev);
4876
4877         if (bp->flags & NO_WOL_FLAG) {
4878                 wol->supported = 0;
4879                 wol->wolopts = 0;
4880         }
4881         else {
4882                 wol->supported = WAKE_MAGIC;
4883                 if (bp->wol)
4884                         wol->wolopts = WAKE_MAGIC;
4885                 else
4886                         wol->wolopts = 0;
4887         }
4888         memset(&wol->sopass, 0, sizeof(wol->sopass));
4889 }
4890
4891 static int
4892 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4893 {
4894         struct bnx2 *bp = netdev_priv(dev);
4895
4896         if (wol->wolopts & ~WAKE_MAGIC)
4897                 return -EINVAL;
4898
4899         if (wol->wolopts & WAKE_MAGIC) {
4900                 if (bp->flags & NO_WOL_FLAG)
4901                         return -EINVAL;
4902
4903                 bp->wol = 1;
4904         }
4905         else {
4906                 bp->wol = 0;
4907         }
4908         return 0;
4909 }
4910
4911 static int
4912 bnx2_nway_reset(struct net_device *dev)
4913 {
4914         struct bnx2 *bp = netdev_priv(dev);
4915         u32 bmcr;
4916
4917         if (!(bp->autoneg & AUTONEG_SPEED)) {
4918                 return -EINVAL;
4919         }
4920
4921         spin_lock_bh(&bp->phy_lock);
4922
4923         /* Force a link down visible on the other side */
4924         if (bp->phy_flags & PHY_SERDES_FLAG) {
4925                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4926                 spin_unlock_bh(&bp->phy_lock);
4927
4928                 msleep(20);
4929
4930                 spin_lock_bh(&bp->phy_lock);
4931
4932                 bp->current_interval = SERDES_AN_TIMEOUT;
4933                 bp->serdes_an_pending = 1;
4934                 mod_timer(&bp->timer, jiffies + bp->current_interval);
4935         }
4936
4937         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4938         bmcr &= ~BMCR_LOOPBACK;
4939         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4940
4941         spin_unlock_bh(&bp->phy_lock);
4942
4943         return 0;
4944 }
4945
4946 static int
4947 bnx2_get_eeprom_len(struct net_device *dev)
4948 {
4949         struct bnx2 *bp = netdev_priv(dev);
4950
4951         if (bp->flash_info == NULL)
4952                 return 0;
4953
4954         return (int) bp->flash_size;
4955 }
4956
4957 static int
4958 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4959                 u8 *eebuf)
4960 {
4961         struct bnx2 *bp = netdev_priv(dev);
4962         int rc;
4963
4964         /* parameters already validated in ethtool_get_eeprom */
4965
4966         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4967
4968         return rc;
4969 }
4970
4971 static int
4972 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4973                 u8 *eebuf)
4974 {
4975         struct bnx2 *bp = netdev_priv(dev);
4976         int rc;
4977
4978         /* parameters already validated in ethtool_set_eeprom */
4979
4980         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4981
4982         return rc;
4983 }
4984
4985 static int
4986 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4987 {
4988         struct bnx2 *bp = netdev_priv(dev);
4989
4990         memset(coal, 0, sizeof(struct ethtool_coalesce));
4991
4992         coal->rx_coalesce_usecs = bp->rx_ticks;
4993         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4994         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4995         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4996
4997         coal->tx_coalesce_usecs = bp->tx_ticks;
4998         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4999         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5000         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5001
5002         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5003
5004         return 0;
5005 }
5006
5007 static int
5008 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5009 {
5010         struct bnx2 *bp = netdev_priv(dev);
5011
5012         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5013         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5014
5015         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5016         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5017
5018         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5019         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5020
5021         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5022         if (bp->rx_quick_cons_trip_int > 0xff)
5023                 bp->rx_quick_cons_trip_int = 0xff;
5024
5025         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5026         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5027
5028         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5029         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5030
5031         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5032         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5033
5034         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5035         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5036                 0xff;
5037
5038         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5039         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5040         bp->stats_ticks &= 0xffff00;
5041
5042         if (netif_running(bp->dev)) {
5043                 bnx2_netif_stop(bp);
5044                 bnx2_init_nic(bp);
5045                 bnx2_netif_start(bp);
5046         }
5047
5048         return 0;
5049 }
5050
5051 static void
5052 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5053 {
5054         struct bnx2 *bp = netdev_priv(dev);
5055
5056         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5057         ering->rx_mini_max_pending = 0;
5058         ering->rx_jumbo_max_pending = 0;
5059
5060         ering->rx_pending = bp->rx_ring_size;
5061         ering->rx_mini_pending = 0;
5062         ering->rx_jumbo_pending = 0;
5063
5064         ering->tx_max_pending = MAX_TX_DESC_CNT;
5065         ering->tx_pending = bp->tx_ring_size;
5066 }
5067
5068 static int
5069 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5070 {
5071         struct bnx2 *bp = netdev_priv(dev);
5072
5073         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5074                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5075                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5076
5077                 return -EINVAL;
5078         }
5079         if (netif_running(bp->dev)) {
5080                 bnx2_netif_stop(bp);
5081                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5082                 bnx2_free_skbs(bp);
5083                 bnx2_free_mem(bp);
5084         }
5085
5086         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5087         bp->tx_ring_size = ering->tx_pending;
5088
5089         if (netif_running(bp->dev)) {
5090                 int rc;
5091
5092                 rc = bnx2_alloc_mem(bp);
5093                 if (rc)
5094                         return rc;
5095                 bnx2_init_nic(bp);
5096                 bnx2_netif_start(bp);
5097         }
5098
5099         return 0;
5100 }
5101
5102 static void
5103 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5104 {
5105         struct bnx2 *bp = netdev_priv(dev);
5106
5107         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5108         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5109         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5110 }
5111
5112 static int
5113 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5114 {
5115         struct bnx2 *bp = netdev_priv(dev);
5116
5117         bp->req_flow_ctrl = 0;
5118         if (epause->rx_pause)
5119                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5120         if (epause->tx_pause)
5121                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5122
5123         if (epause->autoneg) {
5124                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5125         }
5126         else {
5127                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5128         }
5129
5130         spin_lock_bh(&bp->phy_lock);
5131
5132         bnx2_setup_phy(bp);
5133
5134         spin_unlock_bh(&bp->phy_lock);
5135
5136         return 0;
5137 }
5138
5139 static u32
5140 bnx2_get_rx_csum(struct net_device *dev)
5141 {
5142         struct bnx2 *bp = netdev_priv(dev);
5143
5144         return bp->rx_csum;
5145 }
5146
5147 static int
5148 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5149 {
5150         struct bnx2 *bp = netdev_priv(dev);
5151
5152         bp->rx_csum = data;
5153         return 0;
5154 }
5155
5156 static int
5157 bnx2_set_tso(struct net_device *dev, u32 data)
5158 {
5159         if (data)
5160                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5161         else
5162                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5163         return 0;
5164 }
5165
5166 #define BNX2_NUM_STATS 46
5167
5168 static struct {
5169         char string[ETH_GSTRING_LEN];
5170 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5171         { "rx_bytes" },
5172         { "rx_error_bytes" },
5173         { "tx_bytes" },
5174         { "tx_error_bytes" },
5175         { "rx_ucast_packets" },
5176         { "rx_mcast_packets" },
5177         { "rx_bcast_packets" },
5178         { "tx_ucast_packets" },
5179         { "tx_mcast_packets" },
5180         { "tx_bcast_packets" },
5181         { "tx_mac_errors" },
5182         { "tx_carrier_errors" },
5183         { "rx_crc_errors" },
5184         { "rx_align_errors" },
5185         { "tx_single_collisions" },
5186         { "tx_multi_collisions" },
5187         { "tx_deferred" },
5188         { "tx_excess_collisions" },
5189         { "tx_late_collisions" },
5190         { "tx_total_collisions" },
5191         { "rx_fragments" },
5192         { "rx_jabbers" },
5193         { "rx_undersize_packets" },
5194         { "rx_oversize_packets" },
5195         { "rx_64_byte_packets" },
5196         { "rx_65_to_127_byte_packets" },
5197         { "rx_128_to_255_byte_packets" },
5198         { "rx_256_to_511_byte_packets" },
5199         { "rx_512_to_1023_byte_packets" },
5200         { "rx_1024_to_1522_byte_packets" },
5201         { "rx_1523_to_9022_byte_packets" },
5202         { "tx_64_byte_packets" },
5203         { "tx_65_to_127_byte_packets" },
5204         { "tx_128_to_255_byte_packets" },
5205         { "tx_256_to_511_byte_packets" },
5206         { "tx_512_to_1023_byte_packets" },
5207         { "tx_1024_to_1522_byte_packets" },
5208         { "tx_1523_to_9022_byte_packets" },
5209         { "rx_xon_frames" },
5210         { "rx_xoff_frames" },
5211         { "tx_xon_frames" },
5212         { "tx_xoff_frames" },
5213         { "rx_mac_ctrl_frames" },
5214         { "rx_filtered_packets" },
5215         { "rx_discards" },
5216         { "rx_fw_discards" },
5217 };
5218
5219 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5220
5221 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5222     STATS_OFFSET32(stat_IfHCInOctets_hi),
5223     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5224     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5225     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5226     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5227     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5228     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5229     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5230     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5231     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5232     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5233     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5234     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5235     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5236     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5237     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5238     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5239     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5240     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5241     STATS_OFFSET32(stat_EtherStatsCollisions),
5242     STATS_OFFSET32(stat_EtherStatsFragments),
5243     STATS_OFFSET32(stat_EtherStatsJabbers),
5244     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5245     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5246     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5247     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5248     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5249     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5250     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5251     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5252     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5253     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5254     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5255     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5256     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5257     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5258     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5259     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5260     STATS_OFFSET32(stat_XonPauseFramesReceived),
5261     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5262     STATS_OFFSET32(stat_OutXonSent),
5263     STATS_OFFSET32(stat_OutXoffSent),
5264     STATS_OFFSET32(stat_MacControlFramesReceived),
5265     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5266     STATS_OFFSET32(stat_IfInMBUFDiscards),
5267     STATS_OFFSET32(stat_FwRxDrop),
5268 };
5269
5270 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5271  * skipped because of errata.
5272  */
5273 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5274         8,0,8,8,8,8,8,8,8,8,
5275         4,0,4,4,4,4,4,4,4,4,
5276         4,4,4,4,4,4,4,4,4,4,
5277         4,4,4,4,4,4,4,4,4,4,
5278         4,4,4,4,4,4,
5279 };
5280
5281 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5282         8,0,8,8,8,8,8,8,8,8,
5283         4,4,4,4,4,4,4,4,4,4,
5284         4,4,4,4,4,4,4,4,4,4,
5285         4,4,4,4,4,4,4,4,4,4,
5286         4,4,4,4,4,4,
5287 };
5288
5289 #define BNX2_NUM_TESTS 6
5290
5291 static struct {
5292         char string[ETH_GSTRING_LEN];
5293 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5294         { "register_test (offline)" },
5295         { "memory_test (offline)" },
5296         { "loopback_test (offline)" },
5297         { "nvram_test (online)" },
5298         { "interrupt_test (online)" },
5299         { "link_test (online)" },
5300 };
5301
5302 static int
5303 bnx2_self_test_count(struct net_device *dev)
5304 {
5305         return BNX2_NUM_TESTS;
5306 }
5307
5308 static void
5309 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5310 {
5311         struct bnx2 *bp = netdev_priv(dev);
5312
5313         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5314         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5315                 int i;
5316
5317                 bnx2_netif_stop(bp);
5318                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5319                 bnx2_free_skbs(bp);
5320
5321                 if (bnx2_test_registers(bp) != 0) {
5322                         buf[0] = 1;
5323                         etest->flags |= ETH_TEST_FL_FAILED;
5324                 }
5325                 if (bnx2_test_memory(bp) != 0) {
5326                         buf[1] = 1;
5327                         etest->flags |= ETH_TEST_FL_FAILED;
5328                 }
5329                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5330                         etest->flags |= ETH_TEST_FL_FAILED;
5331
5332                 if (!netif_running(bp->dev)) {
5333                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5334                 }
5335                 else {
5336                         bnx2_init_nic(bp);
5337                         bnx2_netif_start(bp);
5338                 }
5339
5340                 /* wait for link up */
5341                 for (i = 0; i < 7; i++) {
5342                         if (bp->link_up)
5343                                 break;
5344                         msleep_interruptible(1000);
5345                 }
5346         }
5347
5348         if (bnx2_test_nvram(bp) != 0) {
5349                 buf[3] = 1;
5350                 etest->flags |= ETH_TEST_FL_FAILED;
5351         }
5352         if (bnx2_test_intr(bp) != 0) {
5353                 buf[4] = 1;
5354                 etest->flags |= ETH_TEST_FL_FAILED;
5355         }
5356
5357         if (bnx2_test_link(bp) != 0) {
5358                 buf[5] = 1;
5359                 etest->flags |= ETH_TEST_FL_FAILED;
5360
5361         }
5362 }
5363
5364 static void
5365 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5366 {
5367         switch (stringset) {
5368         case ETH_SS_STATS:
5369                 memcpy(buf, bnx2_stats_str_arr,
5370                         sizeof(bnx2_stats_str_arr));
5371                 break;
5372         case ETH_SS_TEST:
5373                 memcpy(buf, bnx2_tests_str_arr,
5374                         sizeof(bnx2_tests_str_arr));
5375                 break;
5376         }
5377 }
5378
5379 static int
5380 bnx2_get_stats_count(struct net_device *dev)
5381 {
5382         return BNX2_NUM_STATS;
5383 }
5384
5385 static void
5386 bnx2_get_ethtool_stats(struct net_device *dev,
5387                 struct ethtool_stats *stats, u64 *buf)
5388 {
5389         struct bnx2 *bp = netdev_priv(dev);
5390         int i;
5391         u32 *hw_stats = (u32 *) bp->stats_blk;
5392         u8 *stats_len_arr = NULL;
5393
5394         if (hw_stats == NULL) {
5395                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5396                 return;
5397         }
5398
5399         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5400             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5401             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5402             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5403                 stats_len_arr = bnx2_5706_stats_len_arr;
5404         else
5405                 stats_len_arr = bnx2_5708_stats_len_arr;
5406
5407         for (i = 0; i < BNX2_NUM_STATS; i++) {
5408                 if (stats_len_arr[i] == 0) {
5409                         /* skip this counter */
5410                         buf[i] = 0;
5411                         continue;
5412                 }
5413                 if (stats_len_arr[i] == 4) {
5414                         /* 4-byte counter */
5415                         buf[i] = (u64)
5416                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5417                         continue;
5418                 }
5419                 /* 8-byte counter */
5420                 buf[i] = (((u64) *(hw_stats +
5421                                         bnx2_stats_offset_arr[i])) << 32) +
5422                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5423         }
5424 }
5425
5426 static int
5427 bnx2_phys_id(struct net_device *dev, u32 data)
5428 {
5429         struct bnx2 *bp = netdev_priv(dev);
5430         int i;
5431         u32 save;
5432
5433         if (data == 0)
5434                 data = 2;
5435
5436         save = REG_RD(bp, BNX2_MISC_CFG);
5437         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5438
5439         for (i = 0; i < (data * 2); i++) {
5440                 if ((i % 2) == 0) {
5441                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5442                 }
5443                 else {
5444                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5445                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5446                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5447                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5448                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5449                                 BNX2_EMAC_LED_TRAFFIC);
5450                 }
5451                 msleep_interruptible(500);
5452                 if (signal_pending(current))
5453                         break;
5454         }
5455         REG_WR(bp, BNX2_EMAC_LED, 0);
5456         REG_WR(bp, BNX2_MISC_CFG, save);
5457         return 0;
5458 }
5459
5460 static const struct ethtool_ops bnx2_ethtool_ops = {
5461         .get_settings           = bnx2_get_settings,
5462         .set_settings           = bnx2_set_settings,
5463         .get_drvinfo            = bnx2_get_drvinfo,
5464         .get_regs_len           = bnx2_get_regs_len,
5465         .get_regs               = bnx2_get_regs,
5466         .get_wol                = bnx2_get_wol,
5467         .set_wol                = bnx2_set_wol,
5468         .nway_reset             = bnx2_nway_reset,
5469         .get_link               = ethtool_op_get_link,
5470         .get_eeprom_len         = bnx2_get_eeprom_len,
5471         .get_eeprom             = bnx2_get_eeprom,
5472         .set_eeprom             = bnx2_set_eeprom,
5473         .get_coalesce           = bnx2_get_coalesce,
5474         .set_coalesce           = bnx2_set_coalesce,
5475         .get_ringparam          = bnx2_get_ringparam,
5476         .set_ringparam          = bnx2_set_ringparam,
5477         .get_pauseparam         = bnx2_get_pauseparam,
5478         .set_pauseparam         = bnx2_set_pauseparam,
5479         .get_rx_csum            = bnx2_get_rx_csum,
5480         .set_rx_csum            = bnx2_set_rx_csum,
5481         .get_tx_csum            = ethtool_op_get_tx_csum,
5482         .set_tx_csum            = ethtool_op_set_tx_csum,
5483         .get_sg                 = ethtool_op_get_sg,
5484         .set_sg                 = ethtool_op_set_sg,
5485 #ifdef BCM_TSO
5486         .get_tso                = ethtool_op_get_tso,
5487         .set_tso                = bnx2_set_tso,
5488 #endif
5489         .self_test_count        = bnx2_self_test_count,
5490         .self_test              = bnx2_self_test,
5491         .get_strings            = bnx2_get_strings,
5492         .phys_id                = bnx2_phys_id,
5493         .get_stats_count        = bnx2_get_stats_count,
5494         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5495         .get_perm_addr          = ethtool_op_get_perm_addr,
5496 };
5497
5498 /* Called with rtnl_lock */
5499 static int
5500 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5501 {
5502         struct mii_ioctl_data *data = if_mii(ifr);
5503         struct bnx2 *bp = netdev_priv(dev);
5504         int err;
5505
5506         switch(cmd) {
5507         case SIOCGMIIPHY:
5508                 data->phy_id = bp->phy_addr;
5509
5510                 /* fallthru */
5511         case SIOCGMIIREG: {
5512                 u32 mii_regval;
5513
5514                 spin_lock_bh(&bp->phy_lock);
5515                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5516                 spin_unlock_bh(&bp->phy_lock);
5517
5518                 data->val_out = mii_regval;
5519
5520                 return err;
5521         }
5522
5523         case SIOCSMIIREG:
5524                 if (!capable(CAP_NET_ADMIN))
5525                         return -EPERM;
5526
5527                 spin_lock_bh(&bp->phy_lock);
5528                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5529                 spin_unlock_bh(&bp->phy_lock);
5530
5531                 return err;
5532
5533         default:
5534                 /* do nothing */
5535                 break;
5536         }
5537         return -EOPNOTSUPP;
5538 }
5539
5540 /* Called with rtnl_lock */
5541 static int
5542 bnx2_change_mac_addr(struct net_device *dev, void *p)
5543 {
5544         struct sockaddr *addr = p;
5545         struct bnx2 *bp = netdev_priv(dev);
5546
5547         if (!is_valid_ether_addr(addr->sa_data))
5548                 return -EINVAL;
5549
5550         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5551         if (netif_running(dev))
5552                 bnx2_set_mac_addr(bp);
5553
5554         return 0;
5555 }
5556
5557 /* Called with rtnl_lock */
5558 static int
5559 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5560 {
5561         struct bnx2 *bp = netdev_priv(dev);
5562
5563         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5564                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5565                 return -EINVAL;
5566
5567         dev->mtu = new_mtu;
5568         if (netif_running(dev)) {
5569                 bnx2_netif_stop(bp);
5570
5571                 bnx2_init_nic(bp);
5572
5573                 bnx2_netif_start(bp);
5574         }
5575         return 0;
5576 }
5577
5578 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5579 static void
5580 poll_bnx2(struct net_device *dev)
5581 {
5582         struct bnx2 *bp = netdev_priv(dev);
5583
5584         disable_irq(bp->pdev->irq);
5585         bnx2_interrupt(bp->pdev->irq, dev);
5586         enable_irq(bp->pdev->irq);
5587 }
5588 #endif
5589
5590 static int __devinit
5591 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5592 {
5593         struct bnx2 *bp;
5594         unsigned long mem_len;
5595         int rc;
5596         u32 reg;
5597
5598         SET_MODULE_OWNER(dev);
5599         SET_NETDEV_DEV(dev, &pdev->dev);
5600         bp = netdev_priv(dev);
5601
5602         bp->flags = 0;
5603         bp->phy_flags = 0;
5604
5605         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5606         rc = pci_enable_device(pdev);
5607         if (rc) {
5608                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5609                 goto err_out;
5610         }
5611
5612         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5613                 dev_err(&pdev->dev,
5614                         "Cannot find PCI device base address, aborting.\n");
5615                 rc = -ENODEV;
5616                 goto err_out_disable;
5617         }
5618
5619         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5620         if (rc) {
5621                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5622                 goto err_out_disable;
5623         }
5624
5625         pci_set_master(pdev);
5626
5627         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5628         if (bp->pm_cap == 0) {
5629                 dev_err(&pdev->dev,
5630                         "Cannot find power management capability, aborting.\n");
5631                 rc = -EIO;
5632                 goto err_out_release;
5633         }
5634
5635         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5636                 bp->flags |= USING_DAC_FLAG;
5637                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5638                         dev_err(&pdev->dev,
5639                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5640                         rc = -EIO;
5641                         goto err_out_release;
5642                 }
5643         }
5644         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5645                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5646                 rc = -EIO;
5647                 goto err_out_release;
5648         }
5649
5650         bp->dev = dev;
5651         bp->pdev = pdev;
5652
5653         spin_lock_init(&bp->phy_lock);
5654         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5655
5656         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5657         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5658         dev->mem_end = dev->mem_start + mem_len;
5659         dev->irq = pdev->irq;
5660
5661         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5662
5663         if (!bp->regview) {
5664                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5665                 rc = -ENOMEM;
5666                 goto err_out_release;
5667         }
5668
5669         /* Configure byte swap and enable write to the reg_window registers.
5670          * Rely on CPU to do target byte swapping on big endian systems
5671          * The chip's target access swapping will not swap all accesses
5672          */
5673         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5674                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5675                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5676
5677         bnx2_set_power_state(bp, PCI_D0);
5678
5679         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5680
5681         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5682                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5683                 if (bp->pcix_cap == 0) {
5684                         dev_err(&pdev->dev,
5685                                 "Cannot find PCIX capability, aborting.\n");
5686                         rc = -EIO;
5687                         goto err_out_unmap;
5688                 }
5689         }
5690
5691         /* Get bus information. */
5692         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5693         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5694                 u32 clkreg;
5695
5696                 bp->flags |= PCIX_FLAG;
5697
5698                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5699
5700                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5701                 switch (clkreg) {
5702                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5703                         bp->bus_speed_mhz = 133;
5704                         break;
5705
5706                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5707                         bp->bus_speed_mhz = 100;
5708                         break;
5709
5710                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5711                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5712                         bp->bus_speed_mhz = 66;
5713                         break;
5714
5715                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5716                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5717                         bp->bus_speed_mhz = 50;
5718                         break;
5719
5720                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5721                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5722                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5723                         bp->bus_speed_mhz = 33;
5724                         break;
5725                 }
5726         }
5727         else {
5728                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5729                         bp->bus_speed_mhz = 66;
5730                 else
5731                         bp->bus_speed_mhz = 33;
5732         }
5733
5734         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5735                 bp->flags |= PCI_32BIT_FLAG;
5736
5737         /* 5706A0 may falsely detect SERR and PERR. */
5738         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5739                 reg = REG_RD(bp, PCI_COMMAND);
5740                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5741                 REG_WR(bp, PCI_COMMAND, reg);
5742         }
5743         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5744                 !(bp->flags & PCIX_FLAG)) {
5745
5746                 dev_err(&pdev->dev,
5747                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5748                 goto err_out_unmap;
5749         }
5750
5751         bnx2_init_nvram(bp);
5752
5753         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5754
5755         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5756             BNX2_SHM_HDR_SIGNATURE_SIG)
5757                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5758         else
5759                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5760
5761         /* Get the permanent MAC address.  First we need to make sure the
5762          * firmware is actually running.
5763          */
5764         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5765
5766         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5767             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5768                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5769                 rc = -ENODEV;
5770                 goto err_out_unmap;
5771         }
5772
5773         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5774
5775         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5776         bp->mac_addr[0] = (u8) (reg >> 8);
5777         bp->mac_addr[1] = (u8) reg;
5778
5779         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5780         bp->mac_addr[2] = (u8) (reg >> 24);
5781         bp->mac_addr[3] = (u8) (reg >> 16);
5782         bp->mac_addr[4] = (u8) (reg >> 8);
5783         bp->mac_addr[5] = (u8) reg;
5784
5785         bp->tx_ring_size = MAX_TX_DESC_CNT;
5786         bnx2_set_rx_ring_size(bp, 255);
5787
5788         bp->rx_csum = 1;
5789
5790         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5791
5792         bp->tx_quick_cons_trip_int = 20;
5793         bp->tx_quick_cons_trip = 20;
5794         bp->tx_ticks_int = 80;
5795         bp->tx_ticks = 80;
5796
5797         bp->rx_quick_cons_trip_int = 6;
5798         bp->rx_quick_cons_trip = 6;
5799         bp->rx_ticks_int = 18;
5800         bp->rx_ticks = 18;
5801
5802         bp->stats_ticks = 1000000 & 0xffff00;
5803
5804         bp->timer_interval =  HZ;
5805         bp->current_interval =  HZ;
5806
5807         bp->phy_addr = 1;
5808
5809         /* Disable WOL support if we are running on a SERDES chip. */
5810         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5811                 bp->phy_flags |= PHY_SERDES_FLAG;
5812                 bp->flags |= NO_WOL_FLAG;
5813                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5814                         bp->phy_addr = 2;
5815                         reg = REG_RD_IND(bp, bp->shmem_base +
5816                                          BNX2_SHARED_HW_CFG_CONFIG);
5817                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5818                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5819                 }
5820         }
5821
5822         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5823             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5824             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5825                 bp->flags |= NO_WOL_FLAG;
5826
5827         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5828                 bp->tx_quick_cons_trip_int =
5829                         bp->tx_quick_cons_trip;
5830                 bp->tx_ticks_int = bp->tx_ticks;
5831                 bp->rx_quick_cons_trip_int =
5832                         bp->rx_quick_cons_trip;
5833                 bp->rx_ticks_int = bp->rx_ticks;
5834                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5835                 bp->com_ticks_int = bp->com_ticks;
5836                 bp->cmd_ticks_int = bp->cmd_ticks;
5837         }
5838
5839         /* Disable MSI on 5706 if AMD 8132 bridge is found.
5840          *
5841          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
5842          * with byte enables disabled on the unused 32-bit word.  This is legal
5843          * but causes problems on the AMD 8132 which will eventually stop
5844          * responding after a while.
5845          *
5846          * AMD believes this incompatibility is unique to the 5706, and
5847          * prefers to locally disable MSI rather than globally disabling it
5848          * using pci_msi_quirk.
5849          */
5850         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5851                 struct pci_dev *amd_8132 = NULL;
5852
5853                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5854                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
5855                                                   amd_8132))) {
5856                         u8 rev;
5857
5858                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5859                         if (rev >= 0x10 && rev <= 0x13) {
5860                                 disable_msi = 1;
5861                                 pci_dev_put(amd_8132);
5862                                 break;
5863                         }
5864                 }
5865         }
5866
5867         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5868         bp->req_line_speed = 0;
5869         if (bp->phy_flags & PHY_SERDES_FLAG) {
5870                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5871
5872                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5873                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5874                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5875                         bp->autoneg = 0;
5876                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5877                         bp->req_duplex = DUPLEX_FULL;
5878                 }
5879         }
5880         else {
5881                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5882         }
5883
5884         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5885
5886         init_timer(&bp->timer);
5887         bp->timer.expires = RUN_AT(bp->timer_interval);
5888         bp->timer.data = (unsigned long) bp;
5889         bp->timer.function = bnx2_timer;
5890
5891         return 0;
5892
5893 err_out_unmap:
5894         if (bp->regview) {
5895                 iounmap(bp->regview);
5896                 bp->regview = NULL;
5897         }
5898
5899 err_out_release:
5900         pci_release_regions(pdev);
5901
5902 err_out_disable:
5903         pci_disable_device(pdev);
5904         pci_set_drvdata(pdev, NULL);
5905
5906 err_out:
5907         return rc;
5908 }
5909
5910 static int __devinit
5911 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5912 {
5913         static int version_printed = 0;
5914         struct net_device *dev = NULL;
5915         struct bnx2 *bp;
5916         int rc, i;
5917
5918         if (version_printed++ == 0)
5919                 printk(KERN_INFO "%s", version);
5920
5921         /* dev zeroed in init_etherdev */
5922         dev = alloc_etherdev(sizeof(*bp));
5923
5924         if (!dev)
5925                 return -ENOMEM;
5926
5927         rc = bnx2_init_board(pdev, dev);
5928         if (rc < 0) {
5929                 free_netdev(dev);
5930                 return rc;
5931         }
5932
5933         dev->open = bnx2_open;
5934         dev->hard_start_xmit = bnx2_start_xmit;
5935         dev->stop = bnx2_close;
5936         dev->get_stats = bnx2_get_stats;
5937         dev->set_multicast_list = bnx2_set_rx_mode;
5938         dev->do_ioctl = bnx2_ioctl;
5939         dev->set_mac_address = bnx2_change_mac_addr;
5940         dev->change_mtu = bnx2_change_mtu;
5941         dev->tx_timeout = bnx2_tx_timeout;
5942         dev->watchdog_timeo = TX_TIMEOUT;
5943 #ifdef BCM_VLAN
5944         dev->vlan_rx_register = bnx2_vlan_rx_register;
5945         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5946 #endif
5947         dev->poll = bnx2_poll;
5948         dev->ethtool_ops = &bnx2_ethtool_ops;
5949         dev->weight = 64;
5950
5951         bp = netdev_priv(dev);
5952
5953 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5954         dev->poll_controller = poll_bnx2;
5955 #endif
5956
5957         if ((rc = register_netdev(dev))) {
5958                 dev_err(&pdev->dev, "Cannot register net device\n");
5959                 if (bp->regview)
5960                         iounmap(bp->regview);
5961                 pci_release_regions(pdev);
5962                 pci_disable_device(pdev);
5963                 pci_set_drvdata(pdev, NULL);
5964                 free_netdev(dev);
5965                 return rc;
5966         }
5967
5968         pci_set_drvdata(pdev, dev);
5969
5970         memcpy(dev->dev_addr, bp->mac_addr, 6);
5971         memcpy(dev->perm_addr, bp->mac_addr, 6);
5972         bp->name = board_info[ent->driver_data].name,
5973         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5974                 "IRQ %d, ",
5975                 dev->name,
5976                 bp->name,
5977                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5978                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5979                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5980                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5981                 bp->bus_speed_mhz,
5982                 dev->base_addr,
5983                 bp->pdev->irq);
5984
5985         printk("node addr ");
5986         for (i = 0; i < 6; i++)
5987                 printk("%2.2x", dev->dev_addr[i]);
5988         printk("\n");
5989
5990         dev->features |= NETIF_F_SG;
5991         if (bp->flags & USING_DAC_FLAG)
5992                 dev->features |= NETIF_F_HIGHDMA;
5993         dev->features |= NETIF_F_IP_CSUM;
5994 #ifdef BCM_VLAN
5995         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5996 #endif
5997 #ifdef BCM_TSO
5998         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5999 #endif
6000
6001         netif_carrier_off(bp->dev);
6002
6003         return 0;
6004 }
6005
6006 static void __devexit
6007 bnx2_remove_one(struct pci_dev *pdev)
6008 {
6009         struct net_device *dev = pci_get_drvdata(pdev);
6010         struct bnx2 *bp = netdev_priv(dev);
6011
6012         flush_scheduled_work();
6013
6014         unregister_netdev(dev);
6015
6016         if (bp->regview)
6017                 iounmap(bp->regview);
6018
6019         free_netdev(dev);
6020         pci_release_regions(pdev);
6021         pci_disable_device(pdev);
6022         pci_set_drvdata(pdev, NULL);
6023 }
6024
6025 static int
6026 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6027 {
6028         struct net_device *dev = pci_get_drvdata(pdev);
6029         struct bnx2 *bp = netdev_priv(dev);
6030         u32 reset_code;
6031
6032         if (!netif_running(dev))
6033                 return 0;
6034
6035         flush_scheduled_work();
6036         bnx2_netif_stop(bp);
6037         netif_device_detach(dev);
6038         del_timer_sync(&bp->timer);
6039         if (bp->flags & NO_WOL_FLAG)
6040                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6041         else if (bp->wol)
6042                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6043         else
6044                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6045         bnx2_reset_chip(bp, reset_code);
6046         bnx2_free_skbs(bp);
6047         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6048         return 0;
6049 }
6050
6051 static int
6052 bnx2_resume(struct pci_dev *pdev)
6053 {
6054         struct net_device *dev = pci_get_drvdata(pdev);
6055         struct bnx2 *bp = netdev_priv(dev);
6056
6057         if (!netif_running(dev))
6058                 return 0;
6059
6060         bnx2_set_power_state(bp, PCI_D0);
6061         netif_device_attach(dev);
6062         bnx2_init_nic(bp);
6063         bnx2_netif_start(bp);
6064         return 0;
6065 }
6066
6067 static struct pci_driver bnx2_pci_driver = {
6068         .name           = DRV_MODULE_NAME,
6069         .id_table       = bnx2_pci_tbl,
6070         .probe          = bnx2_init_one,
6071         .remove         = __devexit_p(bnx2_remove_one),
6072         .suspend        = bnx2_suspend,
6073         .resume         = bnx2_resume,
6074 };
6075
6076 static int __init bnx2_init(void)
6077 {
6078         return pci_register_driver(&bnx2_pci_driver);
6079 }
6080
6081 static void __exit bnx2_cleanup(void)
6082 {
6083         pci_unregister_driver(&bnx2_pci_driver);
6084 }
6085
6086 module_init(bnx2_init);
6087 module_exit(bnx2_cleanup);
6088
6089
6090