]> Pileus Git - ~andy/linux/blob - drivers/net/bnx2.c
[BNX2]: Add 5709 reset and runtime code.
[~andy/linux] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
40 #define BCM_VLAN 1
41 #endif
42 #ifdef NETIF_F_TSO
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #define BCM_TSO 1
47 #endif
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
53
54 #include "bnx2.h"
55 #include "bnx2_fw.h"
56
57 #define DRV_MODULE_NAME         "bnx2"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "1.4.45"
60 #define DRV_MODULE_RELDATE      "September 29, 2006"
61
62 #define RUN_AT(x) (jiffies + (x))
63
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT  (5*HZ)
66
67 static const char version[] __devinitdata =
68         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
74
75 static int disable_msi = 0;
76
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80 typedef enum {
81         BCM5706 = 0,
82         NC370T,
83         NC370I,
84         BCM5706S,
85         NC370F,
86         BCM5708,
87         BCM5708S,
88 } board_t;
89
90 /* indexed by board_t, above */
91 static const struct {
92         char *name;
93 } board_info[] __devinitdata = {
94         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95         { "HP NC370T Multifunction Gigabit Server Adapter" },
96         { "HP NC370i Multifunction Gigabit Server Adapter" },
97         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98         { "HP NC370F Multifunction Gigabit Server Adapter" },
99         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101         };
102
103 static struct pci_device_id bnx2_pci_tbl[] = {
104         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
118         { 0, }
119 };
120
121 static struct flash_spec flash_table[] =
122 {
123         /* Slow EEPROM */
124         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127          "EEPROM - slow"},
128         /* Expansion entry 0001 */
129         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132          "Entry 0001"},
133         /* Saifun SA25F010 (non-buffered flash) */
134         /* strap, cfg1, & write1 need updates */
135         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138          "Non-buffered flash (128kB)"},
139         /* Saifun SA25F020 (non-buffered flash) */
140         /* strap, cfg1, & write1 need updates */
141         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144          "Non-buffered flash (256kB)"},
145         /* Expansion entry 0100 */
146         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149          "Entry 0100"},
150         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157          0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160         /* Saifun SA25F005 (non-buffered flash) */
161         /* strap, cfg1, & write1 need updates */
162         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165          "Non-buffered flash (64kB)"},
166         /* Fast EEPROM */
167         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168          1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170          "EEPROM - fast"},
171         /* Expansion entry 1001 */
172         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175          "Entry 1001"},
176         /* Expansion entry 1010 */
177         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 1010"},
181         /* ATMEL AT45DB011B (buffered flash) */
182         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185          "Buffered flash (128kB)"},
186         /* Expansion entry 1100 */
187         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190          "Entry 1100"},
191         /* Expansion entry 1101 */
192         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193          0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195          "Entry 1101"},
196         /* Ateml Expansion entry 1110 */
197         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200          "Entry 1110 (Atmel)"},
201         /* ATMEL AT45DB021B (buffered flash) */
202         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203          1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205          "Buffered flash (256kB)"},
206 };
207
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211 {
212         u32 diff;
213
214         smp_mb();
215         diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216         if (diff > MAX_TX_DESC_CNT)
217                 diff = (diff & MAX_TX_DESC_CNT) - 1;
218         return (bp->tx_ring_size - diff);
219 }
220
221 static u32
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223 {
224         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225         return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226 }
227
228 static void
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230 {
231         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233 }
234
235 static void
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237 {
238         offset += cid_addr;
239         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
240                 int i;
241
242                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
243                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
244                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
245                 for (i = 0; i < 5; i++) {
246                         u32 val;
247                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
248                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
249                                 break;
250                         udelay(5);
251                 }
252         } else {
253                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
254                 REG_WR(bp, BNX2_CTX_DATA, val);
255         }
256 }
257
258 static int
259 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
260 {
261         u32 val1;
262         int i, ret;
263
264         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
265                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
266                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
267
268                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
269                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
270
271                 udelay(40);
272         }
273
274         val1 = (bp->phy_addr << 21) | (reg << 16) |
275                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
276                 BNX2_EMAC_MDIO_COMM_START_BUSY;
277         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
278
279         for (i = 0; i < 50; i++) {
280                 udelay(10);
281
282                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
283                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
284                         udelay(5);
285
286                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
287                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
288
289                         break;
290                 }
291         }
292
293         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
294                 *val = 0x0;
295                 ret = -EBUSY;
296         }
297         else {
298                 *val = val1;
299                 ret = 0;
300         }
301
302         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
303                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
305
306                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
307                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308
309                 udelay(40);
310         }
311
312         return ret;
313 }
314
315 static int
316 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
317 {
318         u32 val1;
319         int i, ret;
320
321         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
322                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
323                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
324
325                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
326                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327
328                 udelay(40);
329         }
330
331         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
332                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
333                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
334         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
335
336         for (i = 0; i < 50; i++) {
337                 udelay(10);
338
339                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
340                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
341                         udelay(5);
342                         break;
343                 }
344         }
345
346         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
347                 ret = -EBUSY;
348         else
349                 ret = 0;
350
351         if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
352                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
354
355                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
356                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
357
358                 udelay(40);
359         }
360
361         return ret;
362 }
363
364 static void
365 bnx2_disable_int(struct bnx2 *bp)
366 {
367         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
368                BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
369         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
370 }
371
372 static void
373 bnx2_enable_int(struct bnx2 *bp)
374 {
375         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
376                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
377                BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
378
379         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
381
382         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
383 }
384
385 static void
386 bnx2_disable_int_sync(struct bnx2 *bp)
387 {
388         atomic_inc(&bp->intr_sem);
389         bnx2_disable_int(bp);
390         synchronize_irq(bp->pdev->irq);
391 }
392
393 static void
394 bnx2_netif_stop(struct bnx2 *bp)
395 {
396         bnx2_disable_int_sync(bp);
397         if (netif_running(bp->dev)) {
398                 netif_poll_disable(bp->dev);
399                 netif_tx_disable(bp->dev);
400                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
401         }
402 }
403
404 static void
405 bnx2_netif_start(struct bnx2 *bp)
406 {
407         if (atomic_dec_and_test(&bp->intr_sem)) {
408                 if (netif_running(bp->dev)) {
409                         netif_wake_queue(bp->dev);
410                         netif_poll_enable(bp->dev);
411                         bnx2_enable_int(bp);
412                 }
413         }
414 }
415
416 static void
417 bnx2_free_mem(struct bnx2 *bp)
418 {
419         int i;
420
421         for (i = 0; i < bp->ctx_pages; i++) {
422                 if (bp->ctx_blk[i]) {
423                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
424                                             bp->ctx_blk[i],
425                                             bp->ctx_blk_mapping[i]);
426                         bp->ctx_blk[i] = NULL;
427                 }
428         }
429         if (bp->status_blk) {
430                 pci_free_consistent(bp->pdev, bp->status_stats_size,
431                                     bp->status_blk, bp->status_blk_mapping);
432                 bp->status_blk = NULL;
433                 bp->stats_blk = NULL;
434         }
435         if (bp->tx_desc_ring) {
436                 pci_free_consistent(bp->pdev,
437                                     sizeof(struct tx_bd) * TX_DESC_CNT,
438                                     bp->tx_desc_ring, bp->tx_desc_mapping);
439                 bp->tx_desc_ring = NULL;
440         }
441         kfree(bp->tx_buf_ring);
442         bp->tx_buf_ring = NULL;
443         for (i = 0; i < bp->rx_max_ring; i++) {
444                 if (bp->rx_desc_ring[i])
445                         pci_free_consistent(bp->pdev,
446                                             sizeof(struct rx_bd) * RX_DESC_CNT,
447                                             bp->rx_desc_ring[i],
448                                             bp->rx_desc_mapping[i]);
449                 bp->rx_desc_ring[i] = NULL;
450         }
451         vfree(bp->rx_buf_ring);
452         bp->rx_buf_ring = NULL;
453 }
454
455 static int
456 bnx2_alloc_mem(struct bnx2 *bp)
457 {
458         int i, status_blk_size;
459
460         bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
461                                   GFP_KERNEL);
462         if (bp->tx_buf_ring == NULL)
463                 return -ENOMEM;
464
465         bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
466                                                 sizeof(struct tx_bd) *
467                                                 TX_DESC_CNT,
468                                                 &bp->tx_desc_mapping);
469         if (bp->tx_desc_ring == NULL)
470                 goto alloc_mem_err;
471
472         bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
473                                   bp->rx_max_ring);
474         if (bp->rx_buf_ring == NULL)
475                 goto alloc_mem_err;
476
477         memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
478                                    bp->rx_max_ring);
479
480         for (i = 0; i < bp->rx_max_ring; i++) {
481                 bp->rx_desc_ring[i] =
482                         pci_alloc_consistent(bp->pdev,
483                                              sizeof(struct rx_bd) * RX_DESC_CNT,
484                                              &bp->rx_desc_mapping[i]);
485                 if (bp->rx_desc_ring[i] == NULL)
486                         goto alloc_mem_err;
487
488         }
489
490         /* Combine status and statistics blocks into one allocation. */
491         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
492         bp->status_stats_size = status_blk_size +
493                                 sizeof(struct statistics_block);
494
495         bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
496                                               &bp->status_blk_mapping);
497         if (bp->status_blk == NULL)
498                 goto alloc_mem_err;
499
500         memset(bp->status_blk, 0, bp->status_stats_size);
501
502         bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
503                                   status_blk_size);
504
505         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
506
507         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
508                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
509                 if (bp->ctx_pages == 0)
510                         bp->ctx_pages = 1;
511                 for (i = 0; i < bp->ctx_pages; i++) {
512                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
513                                                 BCM_PAGE_SIZE,
514                                                 &bp->ctx_blk_mapping[i]);
515                         if (bp->ctx_blk[i] == NULL)
516                                 goto alloc_mem_err;
517                 }
518         }
519         return 0;
520
521 alloc_mem_err:
522         bnx2_free_mem(bp);
523         return -ENOMEM;
524 }
525
526 static void
527 bnx2_report_fw_link(struct bnx2 *bp)
528 {
529         u32 fw_link_status = 0;
530
531         if (bp->link_up) {
532                 u32 bmsr;
533
534                 switch (bp->line_speed) {
535                 case SPEED_10:
536                         if (bp->duplex == DUPLEX_HALF)
537                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
538                         else
539                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
540                         break;
541                 case SPEED_100:
542                         if (bp->duplex == DUPLEX_HALF)
543                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
544                         else
545                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
546                         break;
547                 case SPEED_1000:
548                         if (bp->duplex == DUPLEX_HALF)
549                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
550                         else
551                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
552                         break;
553                 case SPEED_2500:
554                         if (bp->duplex == DUPLEX_HALF)
555                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
556                         else
557                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
558                         break;
559                 }
560
561                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
562
563                 if (bp->autoneg) {
564                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
565
566                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
567                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
568
569                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
570                             bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
571                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
572                         else
573                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
574                 }
575         }
576         else
577                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
578
579         REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
580 }
581
582 static void
583 bnx2_report_link(struct bnx2 *bp)
584 {
585         if (bp->link_up) {
586                 netif_carrier_on(bp->dev);
587                 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
588
589                 printk("%d Mbps ", bp->line_speed);
590
591                 if (bp->duplex == DUPLEX_FULL)
592                         printk("full duplex");
593                 else
594                         printk("half duplex");
595
596                 if (bp->flow_ctrl) {
597                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
598                                 printk(", receive ");
599                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
600                                         printk("& transmit ");
601                         }
602                         else {
603                                 printk(", transmit ");
604                         }
605                         printk("flow control ON");
606                 }
607                 printk("\n");
608         }
609         else {
610                 netif_carrier_off(bp->dev);
611                 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
612         }
613
614         bnx2_report_fw_link(bp);
615 }
616
617 static void
618 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
619 {
620         u32 local_adv, remote_adv;
621
622         bp->flow_ctrl = 0;
623         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
624                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
625
626                 if (bp->duplex == DUPLEX_FULL) {
627                         bp->flow_ctrl = bp->req_flow_ctrl;
628                 }
629                 return;
630         }
631
632         if (bp->duplex != DUPLEX_FULL) {
633                 return;
634         }
635
636         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
637             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
638                 u32 val;
639
640                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
641                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
642                         bp->flow_ctrl |= FLOW_CTRL_TX;
643                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
644                         bp->flow_ctrl |= FLOW_CTRL_RX;
645                 return;
646         }
647
648         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
649         bnx2_read_phy(bp, MII_LPA, &remote_adv);
650
651         if (bp->phy_flags & PHY_SERDES_FLAG) {
652                 u32 new_local_adv = 0;
653                 u32 new_remote_adv = 0;
654
655                 if (local_adv & ADVERTISE_1000XPAUSE)
656                         new_local_adv |= ADVERTISE_PAUSE_CAP;
657                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
658                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
659                 if (remote_adv & ADVERTISE_1000XPAUSE)
660                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
661                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
662                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
663
664                 local_adv = new_local_adv;
665                 remote_adv = new_remote_adv;
666         }
667
668         /* See Table 28B-3 of 802.3ab-1999 spec. */
669         if (local_adv & ADVERTISE_PAUSE_CAP) {
670                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
671                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
672                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
673                         }
674                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
675                                 bp->flow_ctrl = FLOW_CTRL_RX;
676                         }
677                 }
678                 else {
679                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
680                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
681                         }
682                 }
683         }
684         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
685                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
686                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
687
688                         bp->flow_ctrl = FLOW_CTRL_TX;
689                 }
690         }
691 }
692
693 static int
694 bnx2_5708s_linkup(struct bnx2 *bp)
695 {
696         u32 val;
697
698         bp->link_up = 1;
699         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
700         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
701                 case BCM5708S_1000X_STAT1_SPEED_10:
702                         bp->line_speed = SPEED_10;
703                         break;
704                 case BCM5708S_1000X_STAT1_SPEED_100:
705                         bp->line_speed = SPEED_100;
706                         break;
707                 case BCM5708S_1000X_STAT1_SPEED_1G:
708                         bp->line_speed = SPEED_1000;
709                         break;
710                 case BCM5708S_1000X_STAT1_SPEED_2G5:
711                         bp->line_speed = SPEED_2500;
712                         break;
713         }
714         if (val & BCM5708S_1000X_STAT1_FD)
715                 bp->duplex = DUPLEX_FULL;
716         else
717                 bp->duplex = DUPLEX_HALF;
718
719         return 0;
720 }
721
722 static int
723 bnx2_5706s_linkup(struct bnx2 *bp)
724 {
725         u32 bmcr, local_adv, remote_adv, common;
726
727         bp->link_up = 1;
728         bp->line_speed = SPEED_1000;
729
730         bnx2_read_phy(bp, MII_BMCR, &bmcr);
731         if (bmcr & BMCR_FULLDPLX) {
732                 bp->duplex = DUPLEX_FULL;
733         }
734         else {
735                 bp->duplex = DUPLEX_HALF;
736         }
737
738         if (!(bmcr & BMCR_ANENABLE)) {
739                 return 0;
740         }
741
742         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
743         bnx2_read_phy(bp, MII_LPA, &remote_adv);
744
745         common = local_adv & remote_adv;
746         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
747
748                 if (common & ADVERTISE_1000XFULL) {
749                         bp->duplex = DUPLEX_FULL;
750                 }
751                 else {
752                         bp->duplex = DUPLEX_HALF;
753                 }
754         }
755
756         return 0;
757 }
758
759 static int
760 bnx2_copper_linkup(struct bnx2 *bp)
761 {
762         u32 bmcr;
763
764         bnx2_read_phy(bp, MII_BMCR, &bmcr);
765         if (bmcr & BMCR_ANENABLE) {
766                 u32 local_adv, remote_adv, common;
767
768                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
769                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
770
771                 common = local_adv & (remote_adv >> 2);
772                 if (common & ADVERTISE_1000FULL) {
773                         bp->line_speed = SPEED_1000;
774                         bp->duplex = DUPLEX_FULL;
775                 }
776                 else if (common & ADVERTISE_1000HALF) {
777                         bp->line_speed = SPEED_1000;
778                         bp->duplex = DUPLEX_HALF;
779                 }
780                 else {
781                         bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
782                         bnx2_read_phy(bp, MII_LPA, &remote_adv);
783
784                         common = local_adv & remote_adv;
785                         if (common & ADVERTISE_100FULL) {
786                                 bp->line_speed = SPEED_100;
787                                 bp->duplex = DUPLEX_FULL;
788                         }
789                         else if (common & ADVERTISE_100HALF) {
790                                 bp->line_speed = SPEED_100;
791                                 bp->duplex = DUPLEX_HALF;
792                         }
793                         else if (common & ADVERTISE_10FULL) {
794                                 bp->line_speed = SPEED_10;
795                                 bp->duplex = DUPLEX_FULL;
796                         }
797                         else if (common & ADVERTISE_10HALF) {
798                                 bp->line_speed = SPEED_10;
799                                 bp->duplex = DUPLEX_HALF;
800                         }
801                         else {
802                                 bp->line_speed = 0;
803                                 bp->link_up = 0;
804                         }
805                 }
806         }
807         else {
808                 if (bmcr & BMCR_SPEED100) {
809                         bp->line_speed = SPEED_100;
810                 }
811                 else {
812                         bp->line_speed = SPEED_10;
813                 }
814                 if (bmcr & BMCR_FULLDPLX) {
815                         bp->duplex = DUPLEX_FULL;
816                 }
817                 else {
818                         bp->duplex = DUPLEX_HALF;
819                 }
820         }
821
822         return 0;
823 }
824
825 static int
826 bnx2_set_mac_link(struct bnx2 *bp)
827 {
828         u32 val;
829
830         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
831         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
832                 (bp->duplex == DUPLEX_HALF)) {
833                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
834         }
835
836         /* Configure the EMAC mode register. */
837         val = REG_RD(bp, BNX2_EMAC_MODE);
838
839         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
840                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
841                 BNX2_EMAC_MODE_25G_MODE);
842
843         if (bp->link_up) {
844                 switch (bp->line_speed) {
845                         case SPEED_10:
846                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
847                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
848                                         break;
849                                 }
850                                 /* fall through */
851                         case SPEED_100:
852                                 val |= BNX2_EMAC_MODE_PORT_MII;
853                                 break;
854                         case SPEED_2500:
855                                 val |= BNX2_EMAC_MODE_25G_MODE;
856                                 /* fall through */
857                         case SPEED_1000:
858                                 val |= BNX2_EMAC_MODE_PORT_GMII;
859                                 break;
860                 }
861         }
862         else {
863                 val |= BNX2_EMAC_MODE_PORT_GMII;
864         }
865
866         /* Set the MAC to operate in the appropriate duplex mode. */
867         if (bp->duplex == DUPLEX_HALF)
868                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
869         REG_WR(bp, BNX2_EMAC_MODE, val);
870
871         /* Enable/disable rx PAUSE. */
872         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
873
874         if (bp->flow_ctrl & FLOW_CTRL_RX)
875                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
876         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
877
878         /* Enable/disable tx PAUSE. */
879         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
880         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
881
882         if (bp->flow_ctrl & FLOW_CTRL_TX)
883                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
884         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
885
886         /* Acknowledge the interrupt. */
887         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
888
889         return 0;
890 }
891
892 static int
893 bnx2_set_link(struct bnx2 *bp)
894 {
895         u32 bmsr;
896         u8 link_up;
897
898         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
899                 bp->link_up = 1;
900                 return 0;
901         }
902
903         link_up = bp->link_up;
904
905         bnx2_read_phy(bp, MII_BMSR, &bmsr);
906         bnx2_read_phy(bp, MII_BMSR, &bmsr);
907
908         if ((bp->phy_flags & PHY_SERDES_FLAG) &&
909             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
910                 u32 val;
911
912                 val = REG_RD(bp, BNX2_EMAC_STATUS);
913                 if (val & BNX2_EMAC_STATUS_LINK)
914                         bmsr |= BMSR_LSTATUS;
915                 else
916                         bmsr &= ~BMSR_LSTATUS;
917         }
918
919         if (bmsr & BMSR_LSTATUS) {
920                 bp->link_up = 1;
921
922                 if (bp->phy_flags & PHY_SERDES_FLAG) {
923                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
924                                 bnx2_5706s_linkup(bp);
925                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
926                                 bnx2_5708s_linkup(bp);
927                 }
928                 else {
929                         bnx2_copper_linkup(bp);
930                 }
931                 bnx2_resolve_flow_ctrl(bp);
932         }
933         else {
934                 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
935                         (bp->autoneg & AUTONEG_SPEED)) {
936
937                         u32 bmcr;
938
939                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
940                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
941                         if (!(bmcr & BMCR_ANENABLE)) {
942                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
943                                         BMCR_ANENABLE);
944                         }
945                 }
946                 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
947                 bp->link_up = 0;
948         }
949
950         if (bp->link_up != link_up) {
951                 bnx2_report_link(bp);
952         }
953
954         bnx2_set_mac_link(bp);
955
956         return 0;
957 }
958
959 static int
960 bnx2_reset_phy(struct bnx2 *bp)
961 {
962         int i;
963         u32 reg;
964
965         bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
966
967 #define PHY_RESET_MAX_WAIT 100
968         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
969                 udelay(10);
970
971                 bnx2_read_phy(bp, MII_BMCR, &reg);
972                 if (!(reg & BMCR_RESET)) {
973                         udelay(20);
974                         break;
975                 }
976         }
977         if (i == PHY_RESET_MAX_WAIT) {
978                 return -EBUSY;
979         }
980         return 0;
981 }
982
983 static u32
984 bnx2_phy_get_pause_adv(struct bnx2 *bp)
985 {
986         u32 adv = 0;
987
988         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
989                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
990
991                 if (bp->phy_flags & PHY_SERDES_FLAG) {
992                         adv = ADVERTISE_1000XPAUSE;
993                 }
994                 else {
995                         adv = ADVERTISE_PAUSE_CAP;
996                 }
997         }
998         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
999                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1000                         adv = ADVERTISE_1000XPSE_ASYM;
1001                 }
1002                 else {
1003                         adv = ADVERTISE_PAUSE_ASYM;
1004                 }
1005         }
1006         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1007                 if (bp->phy_flags & PHY_SERDES_FLAG) {
1008                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1009                 }
1010                 else {
1011                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1012                 }
1013         }
1014         return adv;
1015 }
1016
1017 static int
1018 bnx2_setup_serdes_phy(struct bnx2 *bp)
1019 {
1020         u32 adv, bmcr, up1;
1021         u32 new_adv = 0;
1022
1023         if (!(bp->autoneg & AUTONEG_SPEED)) {
1024                 u32 new_bmcr;
1025                 int force_link_down = 0;
1026
1027                 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1028                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1029
1030                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1031                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1032                 new_bmcr |= BMCR_SPEED1000;
1033                 if (bp->req_line_speed == SPEED_2500) {
1034                         new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1035                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1036                         if (!(up1 & BCM5708S_UP1_2G5)) {
1037                                 up1 |= BCM5708S_UP1_2G5;
1038                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039                                 force_link_down = 1;
1040                         }
1041                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1042                         bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1043                         if (up1 & BCM5708S_UP1_2G5) {
1044                                 up1 &= ~BCM5708S_UP1_2G5;
1045                                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1046                                 force_link_down = 1;
1047                         }
1048                 }
1049
1050                 if (bp->req_duplex == DUPLEX_FULL) {
1051                         adv |= ADVERTISE_1000XFULL;
1052                         new_bmcr |= BMCR_FULLDPLX;
1053                 }
1054                 else {
1055                         adv |= ADVERTISE_1000XHALF;
1056                         new_bmcr &= ~BMCR_FULLDPLX;
1057                 }
1058                 if ((new_bmcr != bmcr) || (force_link_down)) {
1059                         /* Force a link down visible on the other side */
1060                         if (bp->link_up) {
1061                                 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1062                                                ~(ADVERTISE_1000XFULL |
1063                                                  ADVERTISE_1000XHALF));
1064                                 bnx2_write_phy(bp, MII_BMCR, bmcr |
1065                                         BMCR_ANRESTART | BMCR_ANENABLE);
1066
1067                                 bp->link_up = 0;
1068                                 netif_carrier_off(bp->dev);
1069                                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1070                                 bnx2_report_link(bp);
1071                         }
1072                         bnx2_write_phy(bp, MII_ADVERTISE, adv);
1073                         bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1074                 }
1075                 return 0;
1076         }
1077
1078         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1079                 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1080                 up1 |= BCM5708S_UP1_2G5;
1081                 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1082         }
1083
1084         if (bp->advertising & ADVERTISED_1000baseT_Full)
1085                 new_adv |= ADVERTISE_1000XFULL;
1086
1087         new_adv |= bnx2_phy_get_pause_adv(bp);
1088
1089         bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1090         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1091
1092         bp->serdes_an_pending = 0;
1093         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1094                 /* Force a link down visible on the other side */
1095                 if (bp->link_up) {
1096                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1097                         spin_unlock_bh(&bp->phy_lock);
1098                         msleep(20);
1099                         spin_lock_bh(&bp->phy_lock);
1100                 }
1101
1102                 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1103                 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1104                         BMCR_ANENABLE);
1105                 /* Speed up link-up time when the link partner
1106                  * does not autonegotiate which is very common
1107                  * in blade servers. Some blade servers use
1108                  * IPMI for kerboard input and it's important
1109                  * to minimize link disruptions. Autoneg. involves
1110                  * exchanging base pages plus 3 next pages and
1111                  * normally completes in about 120 msec.
1112                  */
1113                 bp->current_interval = SERDES_AN_TIMEOUT;
1114                 bp->serdes_an_pending = 1;
1115                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1116         }
1117
1118         return 0;
1119 }
1120
1121 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1122         (ADVERTISED_1000baseT_Full)
1123
1124 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1125         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1126         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1127         ADVERTISED_1000baseT_Full)
1128
1129 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1131
1132 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1133
1134 static int
1135 bnx2_setup_copper_phy(struct bnx2 *bp)
1136 {
1137         u32 bmcr;
1138         u32 new_bmcr;
1139
1140         bnx2_read_phy(bp, MII_BMCR, &bmcr);
1141
1142         if (bp->autoneg & AUTONEG_SPEED) {
1143                 u32 adv_reg, adv1000_reg;
1144                 u32 new_adv_reg = 0;
1145                 u32 new_adv1000_reg = 0;
1146
1147                 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149                         ADVERTISE_PAUSE_ASYM);
1150
1151                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152                 adv1000_reg &= PHY_ALL_1000_SPEED;
1153
1154                 if (bp->advertising & ADVERTISED_10baseT_Half)
1155                         new_adv_reg |= ADVERTISE_10HALF;
1156                 if (bp->advertising & ADVERTISED_10baseT_Full)
1157                         new_adv_reg |= ADVERTISE_10FULL;
1158                 if (bp->advertising & ADVERTISED_100baseT_Half)
1159                         new_adv_reg |= ADVERTISE_100HALF;
1160                 if (bp->advertising & ADVERTISED_100baseT_Full)
1161                         new_adv_reg |= ADVERTISE_100FULL;
1162                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163                         new_adv1000_reg |= ADVERTISE_1000FULL;
1164
1165                 new_adv_reg |= ADVERTISE_CSMA;
1166
1167                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1168
1169                 if ((adv1000_reg != new_adv1000_reg) ||
1170                         (adv_reg != new_adv_reg) ||
1171                         ((bmcr & BMCR_ANENABLE) == 0)) {
1172
1173                         bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175                         bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1176                                 BMCR_ANENABLE);
1177                 }
1178                 else if (bp->link_up) {
1179                         /* Flow ctrl may have changed from auto to forced */
1180                         /* or vice-versa. */
1181
1182                         bnx2_resolve_flow_ctrl(bp);
1183                         bnx2_set_mac_link(bp);
1184                 }
1185                 return 0;
1186         }
1187
1188         new_bmcr = 0;
1189         if (bp->req_line_speed == SPEED_100) {
1190                 new_bmcr |= BMCR_SPEED100;
1191         }
1192         if (bp->req_duplex == DUPLEX_FULL) {
1193                 new_bmcr |= BMCR_FULLDPLX;
1194         }
1195         if (new_bmcr != bmcr) {
1196                 u32 bmsr;
1197
1198                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1199                 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1200
1201                 if (bmsr & BMSR_LSTATUS) {
1202                         /* Force link down */
1203                         bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1204                         spin_unlock_bh(&bp->phy_lock);
1205                         msleep(50);
1206                         spin_lock_bh(&bp->phy_lock);
1207
1208                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209                         bnx2_read_phy(bp, MII_BMSR, &bmsr);
1210                 }
1211
1212                 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1213
1214                 /* Normally, the new speed is setup after the link has
1215                  * gone down and up again. In some cases, link will not go
1216                  * down so we need to set up the new speed here.
1217                  */
1218                 if (bmsr & BMSR_LSTATUS) {
1219                         bp->line_speed = bp->req_line_speed;
1220                         bp->duplex = bp->req_duplex;
1221                         bnx2_resolve_flow_ctrl(bp);
1222                         bnx2_set_mac_link(bp);
1223                 }
1224         }
1225         return 0;
1226 }
1227
1228 static int
1229 bnx2_setup_phy(struct bnx2 *bp)
1230 {
1231         if (bp->loopback == MAC_LOOPBACK)
1232                 return 0;
1233
1234         if (bp->phy_flags & PHY_SERDES_FLAG) {
1235                 return (bnx2_setup_serdes_phy(bp));
1236         }
1237         else {
1238                 return (bnx2_setup_copper_phy(bp));
1239         }
1240 }
1241
1242 static int
1243 bnx2_init_5708s_phy(struct bnx2 *bp)
1244 {
1245         u32 val;
1246
1247         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1248         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1249         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1250
1251         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1252         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1253         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1254
1255         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1256         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1257         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1258
1259         if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1260                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1261                 val |= BCM5708S_UP1_2G5;
1262                 bnx2_write_phy(bp, BCM5708S_UP1, val);
1263         }
1264
1265         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1266             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1267             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1268                 /* increase tx signal amplitude */
1269                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1270                                BCM5708S_BLK_ADDR_TX_MISC);
1271                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1272                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1273                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1274                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1275         }
1276
1277         val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1278               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1279
1280         if (val) {
1281                 u32 is_backplane;
1282
1283                 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1284                                           BNX2_SHARED_HW_CFG_CONFIG);
1285                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1286                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1287                                        BCM5708S_BLK_ADDR_TX_MISC);
1288                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1289                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1290                                        BCM5708S_BLK_ADDR_DIG);
1291                 }
1292         }
1293         return 0;
1294 }
1295
1296 static int
1297 bnx2_init_5706s_phy(struct bnx2 *bp)
1298 {
1299         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1300
1301         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1302                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1303
1304         if (bp->dev->mtu > 1500) {
1305                 u32 val;
1306
1307                 /* Set extended packet length bit */
1308                 bnx2_write_phy(bp, 0x18, 0x7);
1309                 bnx2_read_phy(bp, 0x18, &val);
1310                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1311
1312                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1313                 bnx2_read_phy(bp, 0x1c, &val);
1314                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1315         }
1316         else {
1317                 u32 val;
1318
1319                 bnx2_write_phy(bp, 0x18, 0x7);
1320                 bnx2_read_phy(bp, 0x18, &val);
1321                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1322
1323                 bnx2_write_phy(bp, 0x1c, 0x6c00);
1324                 bnx2_read_phy(bp, 0x1c, &val);
1325                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1326         }
1327
1328         return 0;
1329 }
1330
1331 static int
1332 bnx2_init_copper_phy(struct bnx2 *bp)
1333 {
1334         u32 val;
1335
1336         bp->phy_flags |= PHY_CRC_FIX_FLAG;
1337
1338         if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1339                 bnx2_write_phy(bp, 0x18, 0x0c00);
1340                 bnx2_write_phy(bp, 0x17, 0x000a);
1341                 bnx2_write_phy(bp, 0x15, 0x310b);
1342                 bnx2_write_phy(bp, 0x17, 0x201f);
1343                 bnx2_write_phy(bp, 0x15, 0x9506);
1344                 bnx2_write_phy(bp, 0x17, 0x401f);
1345                 bnx2_write_phy(bp, 0x15, 0x14e2);
1346                 bnx2_write_phy(bp, 0x18, 0x0400);
1347         }
1348
1349         if (bp->dev->mtu > 1500) {
1350                 /* Set extended packet length bit */
1351                 bnx2_write_phy(bp, 0x18, 0x7);
1352                 bnx2_read_phy(bp, 0x18, &val);
1353                 bnx2_write_phy(bp, 0x18, val | 0x4000);
1354
1355                 bnx2_read_phy(bp, 0x10, &val);
1356                 bnx2_write_phy(bp, 0x10, val | 0x1);
1357         }
1358         else {
1359                 bnx2_write_phy(bp, 0x18, 0x7);
1360                 bnx2_read_phy(bp, 0x18, &val);
1361                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1362
1363                 bnx2_read_phy(bp, 0x10, &val);
1364                 bnx2_write_phy(bp, 0x10, val & ~0x1);
1365         }
1366
1367         /* ethernet@wirespeed */
1368         bnx2_write_phy(bp, 0x18, 0x7007);
1369         bnx2_read_phy(bp, 0x18, &val);
1370         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1371         return 0;
1372 }
1373
1374
1375 static int
1376 bnx2_init_phy(struct bnx2 *bp)
1377 {
1378         u32 val;
1379         int rc = 0;
1380
1381         bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1382         bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1383
1384         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1385
1386         bnx2_reset_phy(bp);
1387
1388         bnx2_read_phy(bp, MII_PHYSID1, &val);
1389         bp->phy_id = val << 16;
1390         bnx2_read_phy(bp, MII_PHYSID2, &val);
1391         bp->phy_id |= val & 0xffff;
1392
1393         if (bp->phy_flags & PHY_SERDES_FLAG) {
1394                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395                         rc = bnx2_init_5706s_phy(bp);
1396                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397                         rc = bnx2_init_5708s_phy(bp);
1398         }
1399         else {
1400                 rc = bnx2_init_copper_phy(bp);
1401         }
1402
1403         bnx2_setup_phy(bp);
1404
1405         return rc;
1406 }
1407
1408 static int
1409 bnx2_set_mac_loopback(struct bnx2 *bp)
1410 {
1411         u32 mac_mode;
1412
1413         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1414         mac_mode &= ~BNX2_EMAC_MODE_PORT;
1415         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1416         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1417         bp->link_up = 1;
1418         return 0;
1419 }
1420
1421 static int bnx2_test_link(struct bnx2 *);
1422
1423 static int
1424 bnx2_set_phy_loopback(struct bnx2 *bp)
1425 {
1426         u32 mac_mode;
1427         int rc, i;
1428
1429         spin_lock_bh(&bp->phy_lock);
1430         rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1431                             BMCR_SPEED1000);
1432         spin_unlock_bh(&bp->phy_lock);
1433         if (rc)
1434                 return rc;
1435
1436         for (i = 0; i < 10; i++) {
1437                 if (bnx2_test_link(bp) == 0)
1438                         break;
1439                 msleep(100);
1440         }
1441
1442         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1443         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1444                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1445                       BNX2_EMAC_MODE_25G_MODE);
1446
1447         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1448         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1449         bp->link_up = 1;
1450         return 0;
1451 }
1452
1453 static int
1454 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1455 {
1456         int i;
1457         u32 val;
1458
1459         bp->fw_wr_seq++;
1460         msg_data |= bp->fw_wr_seq;
1461
1462         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1463
1464         /* wait for an acknowledgement. */
1465         for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1466                 msleep(10);
1467
1468                 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1469
1470                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1471                         break;
1472         }
1473         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1474                 return 0;
1475
1476         /* If we timed out, inform the firmware that this is the case. */
1477         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1478                 if (!silent)
1479                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
1480                                             "%x\n", msg_data);
1481
1482                 msg_data &= ~BNX2_DRV_MSG_CODE;
1483                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1484
1485                 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1486
1487                 return -EBUSY;
1488         }
1489
1490         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1491                 return -EIO;
1492
1493         return 0;
1494 }
1495
1496 static int
1497 bnx2_init_5709_context(struct bnx2 *bp)
1498 {
1499         int i, ret = 0;
1500         u32 val;
1501
1502         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1503         val |= (BCM_PAGE_BITS - 8) << 16;
1504         REG_WR(bp, BNX2_CTX_COMMAND, val);
1505         for (i = 0; i < bp->ctx_pages; i++) {
1506                 int j;
1507
1508                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1509                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
1510                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1511                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1512                        (u64) bp->ctx_blk_mapping[i] >> 32);
1513                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1514                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1515                 for (j = 0; j < 10; j++) {
1516
1517                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1518                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1519                                 break;
1520                         udelay(5);
1521                 }
1522                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1523                         ret = -EBUSY;
1524                         break;
1525                 }
1526         }
1527         return ret;
1528 }
1529
1530 static void
1531 bnx2_init_context(struct bnx2 *bp)
1532 {
1533         u32 vcid;
1534
1535         vcid = 96;
1536         while (vcid) {
1537                 u32 vcid_addr, pcid_addr, offset;
1538
1539                 vcid--;
1540
1541                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1542                         u32 new_vcid;
1543
1544                         vcid_addr = GET_PCID_ADDR(vcid);
1545                         if (vcid & 0x8) {
1546                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1547                         }
1548                         else {
1549                                 new_vcid = vcid;
1550                         }
1551                         pcid_addr = GET_PCID_ADDR(new_vcid);
1552                 }
1553                 else {
1554                         vcid_addr = GET_CID_ADDR(vcid);
1555                         pcid_addr = vcid_addr;
1556                 }
1557
1558                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1559                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1560
1561                 /* Zero out the context. */
1562                 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1563                         CTX_WR(bp, 0x00, offset, 0);
1564                 }
1565
1566                 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1567                 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1568         }
1569 }
1570
1571 static int
1572 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1573 {
1574         u16 *good_mbuf;
1575         u32 good_mbuf_cnt;
1576         u32 val;
1577
1578         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1579         if (good_mbuf == NULL) {
1580                 printk(KERN_ERR PFX "Failed to allocate memory in "
1581                                     "bnx2_alloc_bad_rbuf\n");
1582                 return -ENOMEM;
1583         }
1584
1585         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1586                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1587
1588         good_mbuf_cnt = 0;
1589
1590         /* Allocate a bunch of mbufs and save the good ones in an array. */
1591         val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1592         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1593                 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1594
1595                 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1596
1597                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1598
1599                 /* The addresses with Bit 9 set are bad memory blocks. */
1600                 if (!(val & (1 << 9))) {
1601                         good_mbuf[good_mbuf_cnt] = (u16) val;
1602                         good_mbuf_cnt++;
1603                 }
1604
1605                 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1606         }
1607
1608         /* Free the good ones back to the mbuf pool thus discarding
1609          * all the bad ones. */
1610         while (good_mbuf_cnt) {
1611                 good_mbuf_cnt--;
1612
1613                 val = good_mbuf[good_mbuf_cnt];
1614                 val = (val << 9) | val | 1;
1615
1616                 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1617         }
1618         kfree(good_mbuf);
1619         return 0;
1620 }
1621
1622 static void
1623 bnx2_set_mac_addr(struct bnx2 *bp)
1624 {
1625         u32 val;
1626         u8 *mac_addr = bp->dev->dev_addr;
1627
1628         val = (mac_addr[0] << 8) | mac_addr[1];
1629
1630         REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1631
1632         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1633                 (mac_addr[4] << 8) | mac_addr[5];
1634
1635         REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1636 }
1637
1638 static inline int
1639 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1640 {
1641         struct sk_buff *skb;
1642         struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1643         dma_addr_t mapping;
1644         struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1645         unsigned long align;
1646
1647         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1648         if (skb == NULL) {
1649                 return -ENOMEM;
1650         }
1651
1652         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1653                 skb_reserve(skb, BNX2_RX_ALIGN - align);
1654
1655         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1656                 PCI_DMA_FROMDEVICE);
1657
1658         rx_buf->skb = skb;
1659         pci_unmap_addr_set(rx_buf, mapping, mapping);
1660
1661         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1662         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1663
1664         bp->rx_prod_bseq += bp->rx_buf_use_size;
1665
1666         return 0;
1667 }
1668
1669 static void
1670 bnx2_phy_int(struct bnx2 *bp)
1671 {
1672         u32 new_link_state, old_link_state;
1673
1674         new_link_state = bp->status_blk->status_attn_bits &
1675                 STATUS_ATTN_BITS_LINK_STATE;
1676         old_link_state = bp->status_blk->status_attn_bits_ack &
1677                 STATUS_ATTN_BITS_LINK_STATE;
1678         if (new_link_state != old_link_state) {
1679                 if (new_link_state) {
1680                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1681                                 STATUS_ATTN_BITS_LINK_STATE);
1682                 }
1683                 else {
1684                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1685                                 STATUS_ATTN_BITS_LINK_STATE);
1686                 }
1687                 bnx2_set_link(bp);
1688         }
1689 }
1690
1691 static void
1692 bnx2_tx_int(struct bnx2 *bp)
1693 {
1694         struct status_block *sblk = bp->status_blk;
1695         u16 hw_cons, sw_cons, sw_ring_cons;
1696         int tx_free_bd = 0;
1697
1698         hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1699         if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1700                 hw_cons++;
1701         }
1702         sw_cons = bp->tx_cons;
1703
1704         while (sw_cons != hw_cons) {
1705                 struct sw_bd *tx_buf;
1706                 struct sk_buff *skb;
1707                 int i, last;
1708
1709                 sw_ring_cons = TX_RING_IDX(sw_cons);
1710
1711                 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1712                 skb = tx_buf->skb;
1713 #ifdef BCM_TSO
1714                 /* partial BD completions possible with TSO packets */
1715                 if (skb_is_gso(skb)) {
1716                         u16 last_idx, last_ring_idx;
1717
1718                         last_idx = sw_cons +
1719                                 skb_shinfo(skb)->nr_frags + 1;
1720                         last_ring_idx = sw_ring_cons +
1721                                 skb_shinfo(skb)->nr_frags + 1;
1722                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1723                                 last_idx++;
1724                         }
1725                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1726                                 break;
1727                         }
1728                 }
1729 #endif
1730                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1731                         skb_headlen(skb), PCI_DMA_TODEVICE);
1732
1733                 tx_buf->skb = NULL;
1734                 last = skb_shinfo(skb)->nr_frags;
1735
1736                 for (i = 0; i < last; i++) {
1737                         sw_cons = NEXT_TX_BD(sw_cons);
1738
1739                         pci_unmap_page(bp->pdev,
1740                                 pci_unmap_addr(
1741                                         &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1742                                         mapping),
1743                                 skb_shinfo(skb)->frags[i].size,
1744                                 PCI_DMA_TODEVICE);
1745                 }
1746
1747                 sw_cons = NEXT_TX_BD(sw_cons);
1748
1749                 tx_free_bd += last + 1;
1750
1751                 dev_kfree_skb(skb);
1752
1753                 hw_cons = bp->hw_tx_cons =
1754                         sblk->status_tx_quick_consumer_index0;
1755
1756                 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1757                         hw_cons++;
1758                 }
1759         }
1760
1761         bp->tx_cons = sw_cons;
1762         /* Need to make the tx_cons update visible to bnx2_start_xmit()
1763          * before checking for netif_queue_stopped().  Without the
1764          * memory barrier, there is a small possibility that bnx2_start_xmit()
1765          * will miss it and cause the queue to be stopped forever.
1766          */
1767         smp_mb();
1768
1769         if (unlikely(netif_queue_stopped(bp->dev)) &&
1770                      (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1771                 netif_tx_lock(bp->dev);
1772                 if ((netif_queue_stopped(bp->dev)) &&
1773                     (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1774                         netif_wake_queue(bp->dev);
1775                 netif_tx_unlock(bp->dev);
1776         }
1777 }
1778
1779 static inline void
1780 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1781         u16 cons, u16 prod)
1782 {
1783         struct sw_bd *cons_rx_buf, *prod_rx_buf;
1784         struct rx_bd *cons_bd, *prod_bd;
1785
1786         cons_rx_buf = &bp->rx_buf_ring[cons];
1787         prod_rx_buf = &bp->rx_buf_ring[prod];
1788
1789         pci_dma_sync_single_for_device(bp->pdev,
1790                 pci_unmap_addr(cons_rx_buf, mapping),
1791                 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1792
1793         bp->rx_prod_bseq += bp->rx_buf_use_size;
1794
1795         prod_rx_buf->skb = skb;
1796
1797         if (cons == prod)
1798                 return;
1799
1800         pci_unmap_addr_set(prod_rx_buf, mapping,
1801                         pci_unmap_addr(cons_rx_buf, mapping));
1802
1803         cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1804         prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1805         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1806         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1807 }
1808
1809 static int
1810 bnx2_rx_int(struct bnx2 *bp, int budget)
1811 {
1812         struct status_block *sblk = bp->status_blk;
1813         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1814         struct l2_fhdr *rx_hdr;
1815         int rx_pkt = 0;
1816
1817         hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1818         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1819                 hw_cons++;
1820         }
1821         sw_cons = bp->rx_cons;
1822         sw_prod = bp->rx_prod;
1823
1824         /* Memory barrier necessary as speculative reads of the rx
1825          * buffer can be ahead of the index in the status block
1826          */
1827         rmb();
1828         while (sw_cons != hw_cons) {
1829                 unsigned int len;
1830                 u32 status;
1831                 struct sw_bd *rx_buf;
1832                 struct sk_buff *skb;
1833                 dma_addr_t dma_addr;
1834
1835                 sw_ring_cons = RX_RING_IDX(sw_cons);
1836                 sw_ring_prod = RX_RING_IDX(sw_prod);
1837
1838                 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1839                 skb = rx_buf->skb;
1840
1841                 rx_buf->skb = NULL;
1842
1843                 dma_addr = pci_unmap_addr(rx_buf, mapping);
1844
1845                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1846                         bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1847
1848                 rx_hdr = (struct l2_fhdr *) skb->data;
1849                 len = rx_hdr->l2_fhdr_pkt_len - 4;
1850
1851                 if ((status = rx_hdr->l2_fhdr_status) &
1852                         (L2_FHDR_ERRORS_BAD_CRC |
1853                         L2_FHDR_ERRORS_PHY_DECODE |
1854                         L2_FHDR_ERRORS_ALIGNMENT |
1855                         L2_FHDR_ERRORS_TOO_SHORT |
1856                         L2_FHDR_ERRORS_GIANT_FRAME)) {
1857
1858                         goto reuse_rx;
1859                 }
1860
1861                 /* Since we don't have a jumbo ring, copy small packets
1862                  * if mtu > 1500
1863                  */
1864                 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1865                         struct sk_buff *new_skb;
1866
1867                         new_skb = netdev_alloc_skb(bp->dev, len + 2);
1868                         if (new_skb == NULL)
1869                                 goto reuse_rx;
1870
1871                         /* aligned copy */
1872                         memcpy(new_skb->data,
1873                                 skb->data + bp->rx_offset - 2,
1874                                 len + 2);
1875
1876                         skb_reserve(new_skb, 2);
1877                         skb_put(new_skb, len);
1878
1879                         bnx2_reuse_rx_skb(bp, skb,
1880                                 sw_ring_cons, sw_ring_prod);
1881
1882                         skb = new_skb;
1883                 }
1884                 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1885                         pci_unmap_single(bp->pdev, dma_addr,
1886                                 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1887
1888                         skb_reserve(skb, bp->rx_offset);
1889                         skb_put(skb, len);
1890                 }
1891                 else {
1892 reuse_rx:
1893                         bnx2_reuse_rx_skb(bp, skb,
1894                                 sw_ring_cons, sw_ring_prod);
1895                         goto next_rx;
1896                 }
1897
1898                 skb->protocol = eth_type_trans(skb, bp->dev);
1899
1900                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1901                         (ntohs(skb->protocol) != 0x8100)) {
1902
1903                         dev_kfree_skb(skb);
1904                         goto next_rx;
1905
1906                 }
1907
1908                 skb->ip_summed = CHECKSUM_NONE;
1909                 if (bp->rx_csum &&
1910                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1911                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
1912
1913                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1914                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1915                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1916                 }
1917
1918 #ifdef BCM_VLAN
1919                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1920                         vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1921                                 rx_hdr->l2_fhdr_vlan_tag);
1922                 }
1923                 else
1924 #endif
1925                         netif_receive_skb(skb);
1926
1927                 bp->dev->last_rx = jiffies;
1928                 rx_pkt++;
1929
1930 next_rx:
1931                 sw_cons = NEXT_RX_BD(sw_cons);
1932                 sw_prod = NEXT_RX_BD(sw_prod);
1933
1934                 if ((rx_pkt == budget))
1935                         break;
1936
1937                 /* Refresh hw_cons to see if there is new work */
1938                 if (sw_cons == hw_cons) {
1939                         hw_cons = bp->hw_rx_cons =
1940                                 sblk->status_rx_quick_consumer_index0;
1941                         if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1942                                 hw_cons++;
1943                         rmb();
1944                 }
1945         }
1946         bp->rx_cons = sw_cons;
1947         bp->rx_prod = sw_prod;
1948
1949         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1950
1951         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1952
1953         mmiowb();
1954
1955         return rx_pkt;
1956
1957 }
1958
1959 /* MSI ISR - The only difference between this and the INTx ISR
1960  * is that the MSI interrupt is always serviced.
1961  */
1962 static irqreturn_t
1963 bnx2_msi(int irq, void *dev_instance)
1964 {
1965         struct net_device *dev = dev_instance;
1966         struct bnx2 *bp = netdev_priv(dev);
1967
1968         prefetch(bp->status_blk);
1969         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1972
1973         /* Return here if interrupt is disabled. */
1974         if (unlikely(atomic_read(&bp->intr_sem) != 0))
1975                 return IRQ_HANDLED;
1976
1977         netif_rx_schedule(dev);
1978
1979         return IRQ_HANDLED;
1980 }
1981
1982 static irqreturn_t
1983 bnx2_interrupt(int irq, void *dev_instance)
1984 {
1985         struct net_device *dev = dev_instance;
1986         struct bnx2 *bp = netdev_priv(dev);
1987
1988         /* When using INTx, it is possible for the interrupt to arrive
1989          * at the CPU before the status block posted prior to the
1990          * interrupt. Reading a register will flush the status block.
1991          * When using MSI, the MSI message will always complete after
1992          * the status block write.
1993          */
1994         if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1995             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1996              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1997                 return IRQ_NONE;
1998
1999         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2001                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2002
2003         /* Return here if interrupt is shared and is disabled. */
2004         if (unlikely(atomic_read(&bp->intr_sem) != 0))
2005                 return IRQ_HANDLED;
2006
2007         netif_rx_schedule(dev);
2008
2009         return IRQ_HANDLED;
2010 }
2011
2012 static inline int
2013 bnx2_has_work(struct bnx2 *bp)
2014 {
2015         struct status_block *sblk = bp->status_blk;
2016
2017         if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2018             (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2019                 return 1;
2020
2021         if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2022             bp->link_up)
2023                 return 1;
2024
2025         return 0;
2026 }
2027
2028 static int
2029 bnx2_poll(struct net_device *dev, int *budget)
2030 {
2031         struct bnx2 *bp = netdev_priv(dev);
2032
2033         if ((bp->status_blk->status_attn_bits &
2034                 STATUS_ATTN_BITS_LINK_STATE) !=
2035                 (bp->status_blk->status_attn_bits_ack &
2036                 STATUS_ATTN_BITS_LINK_STATE)) {
2037
2038                 spin_lock(&bp->phy_lock);
2039                 bnx2_phy_int(bp);
2040                 spin_unlock(&bp->phy_lock);
2041
2042                 /* This is needed to take care of transient status
2043                  * during link changes.
2044                  */
2045                 REG_WR(bp, BNX2_HC_COMMAND,
2046                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2047                 REG_RD(bp, BNX2_HC_COMMAND);
2048         }
2049
2050         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2051                 bnx2_tx_int(bp);
2052
2053         if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2054                 int orig_budget = *budget;
2055                 int work_done;
2056
2057                 if (orig_budget > dev->quota)
2058                         orig_budget = dev->quota;
2059
2060                 work_done = bnx2_rx_int(bp, orig_budget);
2061                 *budget -= work_done;
2062                 dev->quota -= work_done;
2063         }
2064
2065         bp->last_status_idx = bp->status_blk->status_idx;
2066         rmb();
2067
2068         if (!bnx2_has_work(bp)) {
2069                 netif_rx_complete(dev);
2070                 if (likely(bp->flags & USING_MSI_FLAG)) {
2071                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2072                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2073                                bp->last_status_idx);
2074                         return 0;
2075                 }
2076                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2077                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2079                        bp->last_status_idx);
2080
2081                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083                        bp->last_status_idx);
2084                 return 0;
2085         }
2086
2087         return 1;
2088 }
2089
2090 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2091  * from set_multicast.
2092  */
2093 static void
2094 bnx2_set_rx_mode(struct net_device *dev)
2095 {
2096         struct bnx2 *bp = netdev_priv(dev);
2097         u32 rx_mode, sort_mode;
2098         int i;
2099
2100         spin_lock_bh(&bp->phy_lock);
2101
2102         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2103                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2104         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2105 #ifdef BCM_VLAN
2106         if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2107                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2108 #else
2109         if (!(bp->flags & ASF_ENABLE_FLAG))
2110                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2111 #endif
2112         if (dev->flags & IFF_PROMISC) {
2113                 /* Promiscuous mode. */
2114                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2115                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2116                              BNX2_RPM_SORT_USER0_PROM_VLAN;
2117         }
2118         else if (dev->flags & IFF_ALLMULTI) {
2119                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2120                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2121                                0xffffffff);
2122                 }
2123                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2124         }
2125         else {
2126                 /* Accept one or more multicast(s). */
2127                 struct dev_mc_list *mclist;
2128                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2129                 u32 regidx;
2130                 u32 bit;
2131                 u32 crc;
2132
2133                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2134
2135                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2136                      i++, mclist = mclist->next) {
2137
2138                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2139                         bit = crc & 0xff;
2140                         regidx = (bit & 0xe0) >> 5;
2141                         bit &= 0x1f;
2142                         mc_filter[regidx] |= (1 << bit);
2143                 }
2144
2145                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2146                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2147                                mc_filter[i]);
2148                 }
2149
2150                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2151         }
2152
2153         if (rx_mode != bp->rx_mode) {
2154                 bp->rx_mode = rx_mode;
2155                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2156         }
2157
2158         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2159         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2160         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2161
2162         spin_unlock_bh(&bp->phy_lock);
2163 }
2164
2165 #define FW_BUF_SIZE     0x8000
2166
2167 static int
2168 bnx2_gunzip_init(struct bnx2 *bp)
2169 {
2170         if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2171                 goto gunzip_nomem1;
2172
2173         if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2174                 goto gunzip_nomem2;
2175
2176         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2177         if (bp->strm->workspace == NULL)
2178                 goto gunzip_nomem3;
2179
2180         return 0;
2181
2182 gunzip_nomem3:
2183         kfree(bp->strm);
2184         bp->strm = NULL;
2185
2186 gunzip_nomem2:
2187         vfree(bp->gunzip_buf);
2188         bp->gunzip_buf = NULL;
2189
2190 gunzip_nomem1:
2191         printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2192                             "uncompression.\n", bp->dev->name);
2193         return -ENOMEM;
2194 }
2195
2196 static void
2197 bnx2_gunzip_end(struct bnx2 *bp)
2198 {
2199         kfree(bp->strm->workspace);
2200
2201         kfree(bp->strm);
2202         bp->strm = NULL;
2203
2204         if (bp->gunzip_buf) {
2205                 vfree(bp->gunzip_buf);
2206                 bp->gunzip_buf = NULL;
2207         }
2208 }
2209
2210 static int
2211 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2212 {
2213         int n, rc;
2214
2215         /* check gzip header */
2216         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2217                 return -EINVAL;
2218
2219         n = 10;
2220
2221 #define FNAME   0x8
2222         if (zbuf[3] & FNAME)
2223                 while ((zbuf[n++] != 0) && (n < len));
2224
2225         bp->strm->next_in = zbuf + n;
2226         bp->strm->avail_in = len - n;
2227         bp->strm->next_out = bp->gunzip_buf;
2228         bp->strm->avail_out = FW_BUF_SIZE;
2229
2230         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2231         if (rc != Z_OK)
2232                 return rc;
2233
2234         rc = zlib_inflate(bp->strm, Z_FINISH);
2235
2236         *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2237         *outbuf = bp->gunzip_buf;
2238
2239         if ((rc != Z_OK) && (rc != Z_STREAM_END))
2240                 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2241                        bp->dev->name, bp->strm->msg);
2242
2243         zlib_inflateEnd(bp->strm);
2244
2245         if (rc == Z_STREAM_END)
2246                 return 0;
2247
2248         return rc;
2249 }
2250
2251 static void
2252 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2253         u32 rv2p_proc)
2254 {
2255         int i;
2256         u32 val;
2257
2258
2259         for (i = 0; i < rv2p_code_len; i += 8) {
2260                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2261                 rv2p_code++;
2262                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2263                 rv2p_code++;
2264
2265                 if (rv2p_proc == RV2P_PROC1) {
2266                         val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2267                         REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2268                 }
2269                 else {
2270                         val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2271                         REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2272                 }
2273         }
2274
2275         /* Reset the processor, un-stall is done later. */
2276         if (rv2p_proc == RV2P_PROC1) {
2277                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2278         }
2279         else {
2280                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2281         }
2282 }
2283
2284 static int
2285 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2286 {
2287         u32 offset;
2288         u32 val;
2289         int rc;
2290
2291         /* Halt the CPU. */
2292         val = REG_RD_IND(bp, cpu_reg->mode);
2293         val |= cpu_reg->mode_value_halt;
2294         REG_WR_IND(bp, cpu_reg->mode, val);
2295         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2296
2297         /* Load the Text area. */
2298         offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2299         if (fw->gz_text) {
2300                 u32 text_len;
2301                 void *text;
2302
2303                 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2304                                  &text_len);
2305                 if (rc)
2306                         return rc;
2307
2308                 fw->text = text;
2309         }
2310         if (fw->gz_text) {
2311                 int j;
2312
2313                 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2314                         REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2315                 }
2316         }
2317
2318         /* Load the Data area. */
2319         offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2320         if (fw->data) {
2321                 int j;
2322
2323                 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2324                         REG_WR_IND(bp, offset, fw->data[j]);
2325                 }
2326         }
2327
2328         /* Load the SBSS area. */
2329         offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2330         if (fw->sbss) {
2331                 int j;
2332
2333                 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2334                         REG_WR_IND(bp, offset, fw->sbss[j]);
2335                 }
2336         }
2337
2338         /* Load the BSS area. */
2339         offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2340         if (fw->bss) {
2341                 int j;
2342
2343                 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2344                         REG_WR_IND(bp, offset, fw->bss[j]);
2345                 }
2346         }
2347
2348         /* Load the Read-Only area. */
2349         offset = cpu_reg->spad_base +
2350                 (fw->rodata_addr - cpu_reg->mips_view_base);
2351         if (fw->rodata) {
2352                 int j;
2353
2354                 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2355                         REG_WR_IND(bp, offset, fw->rodata[j]);
2356                 }
2357         }
2358
2359         /* Clear the pre-fetch instruction. */
2360         REG_WR_IND(bp, cpu_reg->inst, 0);
2361         REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2362
2363         /* Start the CPU. */
2364         val = REG_RD_IND(bp, cpu_reg->mode);
2365         val &= ~cpu_reg->mode_value_halt;
2366         REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2367         REG_WR_IND(bp, cpu_reg->mode, val);
2368
2369         return 0;
2370 }
2371
2372 static int
2373 bnx2_init_cpus(struct bnx2 *bp)
2374 {
2375         struct cpu_reg cpu_reg;
2376         struct fw_info *fw;
2377         int rc = 0;
2378         void *text;
2379         u32 text_len;
2380
2381         if ((rc = bnx2_gunzip_init(bp)) != 0)
2382                 return rc;
2383
2384         /* Initialize the RV2P processor. */
2385         rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2386                          &text_len);
2387         if (rc)
2388                 goto init_cpu_err;
2389
2390         load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2391
2392         rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2393                          &text_len);
2394         if (rc)
2395                 goto init_cpu_err;
2396
2397         load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2398
2399         /* Initialize the RX Processor. */
2400         cpu_reg.mode = BNX2_RXP_CPU_MODE;
2401         cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2402         cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2403         cpu_reg.state = BNX2_RXP_CPU_STATE;
2404         cpu_reg.state_value_clear = 0xffffff;
2405         cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2406         cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2407         cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2408         cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2409         cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2410         cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2411         cpu_reg.mips_view_base = 0x8000000;
2412
2413         fw = &bnx2_rxp_fw_06;
2414
2415         rc = load_cpu_fw(bp, &cpu_reg, fw);
2416         if (rc)
2417                 goto init_cpu_err;
2418
2419         /* Initialize the TX Processor. */
2420         cpu_reg.mode = BNX2_TXP_CPU_MODE;
2421         cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2422         cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2423         cpu_reg.state = BNX2_TXP_CPU_STATE;
2424         cpu_reg.state_value_clear = 0xffffff;
2425         cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2426         cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2427         cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2428         cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2429         cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2430         cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2431         cpu_reg.mips_view_base = 0x8000000;
2432
2433         fw = &bnx2_txp_fw_06;
2434
2435         rc = load_cpu_fw(bp, &cpu_reg, fw);
2436         if (rc)
2437                 goto init_cpu_err;
2438
2439         /* Initialize the TX Patch-up Processor. */
2440         cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2441         cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2442         cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2443         cpu_reg.state = BNX2_TPAT_CPU_STATE;
2444         cpu_reg.state_value_clear = 0xffffff;
2445         cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2446         cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2447         cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2448         cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2449         cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2450         cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2451         cpu_reg.mips_view_base = 0x8000000;
2452
2453         fw = &bnx2_tpat_fw_06;
2454
2455         rc = load_cpu_fw(bp, &cpu_reg, fw);
2456         if (rc)
2457                 goto init_cpu_err;
2458
2459         /* Initialize the Completion Processor. */
2460         cpu_reg.mode = BNX2_COM_CPU_MODE;
2461         cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2462         cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2463         cpu_reg.state = BNX2_COM_CPU_STATE;
2464         cpu_reg.state_value_clear = 0xffffff;
2465         cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2466         cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2467         cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2468         cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2469         cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2470         cpu_reg.spad_base = BNX2_COM_SCRATCH;
2471         cpu_reg.mips_view_base = 0x8000000;
2472
2473         fw = &bnx2_com_fw_06;
2474
2475         rc = load_cpu_fw(bp, &cpu_reg, fw);
2476         if (rc)
2477                 goto init_cpu_err;
2478
2479 init_cpu_err:
2480         bnx2_gunzip_end(bp);
2481         return rc;
2482 }
2483
2484 static int
2485 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2486 {
2487         u16 pmcsr;
2488
2489         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2490
2491         switch (state) {
2492         case PCI_D0: {
2493                 u32 val;
2494
2495                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2496                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2497                         PCI_PM_CTRL_PME_STATUS);
2498
2499                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2500                         /* delay required during transition out of D3hot */
2501                         msleep(20);
2502
2503                 val = REG_RD(bp, BNX2_EMAC_MODE);
2504                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2505                 val &= ~BNX2_EMAC_MODE_MPKT;
2506                 REG_WR(bp, BNX2_EMAC_MODE, val);
2507
2508                 val = REG_RD(bp, BNX2_RPM_CONFIG);
2509                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2510                 REG_WR(bp, BNX2_RPM_CONFIG, val);
2511                 break;
2512         }
2513         case PCI_D3hot: {
2514                 int i;
2515                 u32 val, wol_msg;
2516
2517                 if (bp->wol) {
2518                         u32 advertising;
2519                         u8 autoneg;
2520
2521                         autoneg = bp->autoneg;
2522                         advertising = bp->advertising;
2523
2524                         bp->autoneg = AUTONEG_SPEED;
2525                         bp->advertising = ADVERTISED_10baseT_Half |
2526                                 ADVERTISED_10baseT_Full |
2527                                 ADVERTISED_100baseT_Half |
2528                                 ADVERTISED_100baseT_Full |
2529                                 ADVERTISED_Autoneg;
2530
2531                         bnx2_setup_copper_phy(bp);
2532
2533                         bp->autoneg = autoneg;
2534                         bp->advertising = advertising;
2535
2536                         bnx2_set_mac_addr(bp);
2537
2538                         val = REG_RD(bp, BNX2_EMAC_MODE);
2539
2540                         /* Enable port mode. */
2541                         val &= ~BNX2_EMAC_MODE_PORT;
2542                         val |= BNX2_EMAC_MODE_PORT_MII |
2543                                BNX2_EMAC_MODE_MPKT_RCVD |
2544                                BNX2_EMAC_MODE_ACPI_RCVD |
2545                                BNX2_EMAC_MODE_MPKT;
2546
2547                         REG_WR(bp, BNX2_EMAC_MODE, val);
2548
2549                         /* receive all multicast */
2550                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2551                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2552                                        0xffffffff);
2553                         }
2554                         REG_WR(bp, BNX2_EMAC_RX_MODE,
2555                                BNX2_EMAC_RX_MODE_SORT_MODE);
2556
2557                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2558                               BNX2_RPM_SORT_USER0_MC_EN;
2559                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2560                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2561                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2562                                BNX2_RPM_SORT_USER0_ENA);
2563
2564                         /* Need to enable EMAC and RPM for WOL. */
2565                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2566                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2567                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2568                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2569
2570                         val = REG_RD(bp, BNX2_RPM_CONFIG);
2571                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2572                         REG_WR(bp, BNX2_RPM_CONFIG, val);
2573
2574                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2575                 }
2576                 else {
2577                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2578                 }
2579
2580                 if (!(bp->flags & NO_WOL_FLAG))
2581                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2582
2583                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2584                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2585                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2586
2587                         if (bp->wol)
2588                                 pmcsr |= 3;
2589                 }
2590                 else {
2591                         pmcsr |= 3;
2592                 }
2593                 if (bp->wol) {
2594                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2595                 }
2596                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2597                                       pmcsr);
2598
2599                 /* No more memory access after this point until
2600                  * device is brought back to D0.
2601                  */
2602                 udelay(50);
2603                 break;
2604         }
2605         default:
2606                 return -EINVAL;
2607         }
2608         return 0;
2609 }
2610
2611 static int
2612 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2613 {
2614         u32 val;
2615         int j;
2616
2617         /* Request access to the flash interface. */
2618         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2619         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2620                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2621                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2622                         break;
2623
2624                 udelay(5);
2625         }
2626
2627         if (j >= NVRAM_TIMEOUT_COUNT)
2628                 return -EBUSY;
2629
2630         return 0;
2631 }
2632
2633 static int
2634 bnx2_release_nvram_lock(struct bnx2 *bp)
2635 {
2636         int j;
2637         u32 val;
2638
2639         /* Relinquish nvram interface. */
2640         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2641
2642         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2643                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2644                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2645                         break;
2646
2647                 udelay(5);
2648         }
2649
2650         if (j >= NVRAM_TIMEOUT_COUNT)
2651                 return -EBUSY;
2652
2653         return 0;
2654 }
2655
2656
2657 static int
2658 bnx2_enable_nvram_write(struct bnx2 *bp)
2659 {
2660         u32 val;
2661
2662         val = REG_RD(bp, BNX2_MISC_CFG);
2663         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2664
2665         if (!bp->flash_info->buffered) {
2666                 int j;
2667
2668                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669                 REG_WR(bp, BNX2_NVM_COMMAND,
2670                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2671
2672                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2673                         udelay(5);
2674
2675                         val = REG_RD(bp, BNX2_NVM_COMMAND);
2676                         if (val & BNX2_NVM_COMMAND_DONE)
2677                                 break;
2678                 }
2679
2680                 if (j >= NVRAM_TIMEOUT_COUNT)
2681                         return -EBUSY;
2682         }
2683         return 0;
2684 }
2685
2686 static void
2687 bnx2_disable_nvram_write(struct bnx2 *bp)
2688 {
2689         u32 val;
2690
2691         val = REG_RD(bp, BNX2_MISC_CFG);
2692         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2693 }
2694
2695
2696 static void
2697 bnx2_enable_nvram_access(struct bnx2 *bp)
2698 {
2699         u32 val;
2700
2701         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2702         /* Enable both bits, even on read. */
2703         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2704                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2705 }
2706
2707 static void
2708 bnx2_disable_nvram_access(struct bnx2 *bp)
2709 {
2710         u32 val;
2711
2712         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2713         /* Disable both bits, even after read. */
2714         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2715                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2716                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
2717 }
2718
2719 static int
2720 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2721 {
2722         u32 cmd;
2723         int j;
2724
2725         if (bp->flash_info->buffered)
2726                 /* Buffered flash, no erase needed */
2727                 return 0;
2728
2729         /* Build an erase command */
2730         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2731               BNX2_NVM_COMMAND_DOIT;
2732
2733         /* Need to clear DONE bit separately. */
2734         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2735
2736         /* Address of the NVRAM to read from. */
2737         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2738
2739         /* Issue an erase command. */
2740         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2741
2742         /* Wait for completion. */
2743         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2744                 u32 val;
2745
2746                 udelay(5);
2747
2748                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2749                 if (val & BNX2_NVM_COMMAND_DONE)
2750                         break;
2751         }
2752
2753         if (j >= NVRAM_TIMEOUT_COUNT)
2754                 return -EBUSY;
2755
2756         return 0;
2757 }
2758
2759 static int
2760 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2761 {
2762         u32 cmd;
2763         int j;
2764
2765         /* Build the command word. */
2766         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2767
2768         /* Calculate an offset of a buffered flash. */
2769         if (bp->flash_info->buffered) {
2770                 offset = ((offset / bp->flash_info->page_size) <<
2771                            bp->flash_info->page_bits) +
2772                           (offset % bp->flash_info->page_size);
2773         }
2774
2775         /* Need to clear DONE bit separately. */
2776         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2777
2778         /* Address of the NVRAM to read from. */
2779         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2780
2781         /* Issue a read command. */
2782         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2783
2784         /* Wait for completion. */
2785         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2786                 u32 val;
2787
2788                 udelay(5);
2789
2790                 val = REG_RD(bp, BNX2_NVM_COMMAND);
2791                 if (val & BNX2_NVM_COMMAND_DONE) {
2792                         val = REG_RD(bp, BNX2_NVM_READ);
2793
2794                         val = be32_to_cpu(val);
2795                         memcpy(ret_val, &val, 4);
2796                         break;
2797                 }
2798         }
2799         if (j >= NVRAM_TIMEOUT_COUNT)
2800                 return -EBUSY;
2801
2802         return 0;
2803 }
2804
2805
2806 static int
2807 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2808 {
2809         u32 cmd, val32;
2810         int j;
2811
2812         /* Build the command word. */
2813         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2814
2815         /* Calculate an offset of a buffered flash. */
2816         if (bp->flash_info->buffered) {
2817                 offset = ((offset / bp->flash_info->page_size) <<
2818                           bp->flash_info->page_bits) +
2819                          (offset % bp->flash_info->page_size);
2820         }
2821
2822         /* Need to clear DONE bit separately. */
2823         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2824
2825         memcpy(&val32, val, 4);
2826         val32 = cpu_to_be32(val32);
2827
2828         /* Write the data. */
2829         REG_WR(bp, BNX2_NVM_WRITE, val32);
2830
2831         /* Address of the NVRAM to write to. */
2832         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2833
2834         /* Issue the write command. */
2835         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2836
2837         /* Wait for completion. */
2838         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2839                 udelay(5);
2840
2841                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2842                         break;
2843         }
2844         if (j >= NVRAM_TIMEOUT_COUNT)
2845                 return -EBUSY;
2846
2847         return 0;
2848 }
2849
2850 static int
2851 bnx2_init_nvram(struct bnx2 *bp)
2852 {
2853         u32 val;
2854         int j, entry_count, rc;
2855         struct flash_spec *flash;
2856
2857         /* Determine the selected interface. */
2858         val = REG_RD(bp, BNX2_NVM_CFG1);
2859
2860         entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2861
2862         rc = 0;
2863         if (val & 0x40000000) {
2864
2865                 /* Flash interface has been reconfigured */
2866                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2867                      j++, flash++) {
2868                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
2869                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2870                                 bp->flash_info = flash;
2871                                 break;
2872                         }
2873                 }
2874         }
2875         else {
2876                 u32 mask;
2877                 /* Not yet been reconfigured */
2878
2879                 if (val & (1 << 23))
2880                         mask = FLASH_BACKUP_STRAP_MASK;
2881                 else
2882                         mask = FLASH_STRAP_MASK;
2883
2884                 for (j = 0, flash = &flash_table[0]; j < entry_count;
2885                         j++, flash++) {
2886
2887                         if ((val & mask) == (flash->strapping & mask)) {
2888                                 bp->flash_info = flash;
2889
2890                                 /* Request access to the flash interface. */
2891                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2892                                         return rc;
2893
2894                                 /* Enable access to flash interface */
2895                                 bnx2_enable_nvram_access(bp);
2896
2897                                 /* Reconfigure the flash interface */
2898                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2899                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2900                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2901                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2902
2903                                 /* Disable access to flash interface */
2904                                 bnx2_disable_nvram_access(bp);
2905                                 bnx2_release_nvram_lock(bp);
2906
2907                                 break;
2908                         }
2909                 }
2910         } /* if (val & 0x40000000) */
2911
2912         if (j == entry_count) {
2913                 bp->flash_info = NULL;
2914                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2915                 return -ENODEV;
2916         }
2917
2918         val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2919         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2920         if (val)
2921                 bp->flash_size = val;
2922         else
2923                 bp->flash_size = bp->flash_info->total_size;
2924
2925         return rc;
2926 }
2927
2928 static int
2929 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2930                 int buf_size)
2931 {
2932         int rc = 0;
2933         u32 cmd_flags, offset32, len32, extra;
2934
2935         if (buf_size == 0)
2936                 return 0;
2937
2938         /* Request access to the flash interface. */
2939         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2940                 return rc;
2941
2942         /* Enable access to flash interface */
2943         bnx2_enable_nvram_access(bp);
2944
2945         len32 = buf_size;
2946         offset32 = offset;
2947         extra = 0;
2948
2949         cmd_flags = 0;
2950
2951         if (offset32 & 3) {
2952                 u8 buf[4];
2953                 u32 pre_len;
2954
2955                 offset32 &= ~3;
2956                 pre_len = 4 - (offset & 3);
2957
2958                 if (pre_len >= len32) {
2959                         pre_len = len32;
2960                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2961                                     BNX2_NVM_COMMAND_LAST;
2962                 }
2963                 else {
2964                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
2965                 }
2966
2967                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2968
2969                 if (rc)
2970                         return rc;
2971
2972                 memcpy(ret_buf, buf + (offset & 3), pre_len);
2973
2974                 offset32 += 4;
2975                 ret_buf += pre_len;
2976                 len32 -= pre_len;
2977         }
2978         if (len32 & 3) {
2979                 extra = 4 - (len32 & 3);
2980                 len32 = (len32 + 4) & ~3;
2981         }
2982
2983         if (len32 == 4) {
2984                 u8 buf[4];
2985
2986                 if (cmd_flags)
2987                         cmd_flags = BNX2_NVM_COMMAND_LAST;
2988                 else
2989                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
2990                                     BNX2_NVM_COMMAND_LAST;
2991
2992                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2993
2994                 memcpy(ret_buf, buf, 4 - extra);
2995         }
2996         else if (len32 > 0) {
2997                 u8 buf[4];
2998
2999                 /* Read the first word. */
3000                 if (cmd_flags)
3001                         cmd_flags = 0;
3002                 else
3003                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
3004
3005                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3006
3007                 /* Advance to the next dword. */
3008                 offset32 += 4;
3009                 ret_buf += 4;
3010                 len32 -= 4;
3011
3012                 while (len32 > 4 && rc == 0) {
3013                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3014
3015                         /* Advance to the next dword. */
3016                         offset32 += 4;
3017                         ret_buf += 4;
3018                         len32 -= 4;
3019                 }
3020
3021                 if (rc)
3022                         return rc;
3023
3024                 cmd_flags = BNX2_NVM_COMMAND_LAST;
3025                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3026
3027                 memcpy(ret_buf, buf, 4 - extra);
3028         }
3029
3030         /* Disable access to flash interface */
3031         bnx2_disable_nvram_access(bp);
3032
3033         bnx2_release_nvram_lock(bp);
3034
3035         return rc;
3036 }
3037
3038 static int
3039 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3040                 int buf_size)
3041 {
3042         u32 written, offset32, len32;
3043         u8 *buf, start[4], end[4], *flash_buffer = NULL;
3044         int rc = 0;
3045         int align_start, align_end;
3046
3047         buf = data_buf;
3048         offset32 = offset;
3049         len32 = buf_size;
3050         align_start = align_end = 0;
3051
3052         if ((align_start = (offset32 & 3))) {
3053                 offset32 &= ~3;
3054                 len32 += align_start;
3055                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3056                         return rc;
3057         }
3058
3059         if (len32 & 3) {
3060                 if ((len32 > 4) || !align_start) {
3061                         align_end = 4 - (len32 & 3);
3062                         len32 += align_end;
3063                         if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3064                                 end, 4))) {
3065                                 return rc;
3066                         }
3067                 }
3068         }
3069
3070         if (align_start || align_end) {
3071                 buf = kmalloc(len32, GFP_KERNEL);
3072                 if (buf == 0)
3073                         return -ENOMEM;
3074                 if (align_start) {
3075                         memcpy(buf, start, 4);
3076                 }
3077                 if (align_end) {
3078                         memcpy(buf + len32 - 4, end, 4);
3079                 }
3080                 memcpy(buf + align_start, data_buf, buf_size);
3081         }
3082
3083         if (bp->flash_info->buffered == 0) {
3084                 flash_buffer = kmalloc(264, GFP_KERNEL);
3085                 if (flash_buffer == NULL) {
3086                         rc = -ENOMEM;
3087                         goto nvram_write_end;
3088                 }
3089         }
3090
3091         written = 0;
3092         while ((written < len32) && (rc == 0)) {
3093                 u32 page_start, page_end, data_start, data_end;
3094                 u32 addr, cmd_flags;
3095                 int i;
3096
3097                 /* Find the page_start addr */
3098                 page_start = offset32 + written;
3099                 page_start -= (page_start % bp->flash_info->page_size);
3100                 /* Find the page_end addr */
3101                 page_end = page_start + bp->flash_info->page_size;
3102                 /* Find the data_start addr */
3103                 data_start = (written == 0) ? offset32 : page_start;
3104                 /* Find the data_end addr */
3105                 data_end = (page_end > offset32 + len32) ?
3106                         (offset32 + len32) : page_end;
3107
3108                 /* Request access to the flash interface. */
3109                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3110                         goto nvram_write_end;
3111
3112                 /* Enable access to flash interface */
3113                 bnx2_enable_nvram_access(bp);
3114
3115                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3116                 if (bp->flash_info->buffered == 0) {
3117                         int j;
3118
3119                         /* Read the whole page into the buffer
3120                          * (non-buffer flash only) */
3121                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
3122                                 if (j == (bp->flash_info->page_size - 4)) {
3123                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
3124                                 }
3125                                 rc = bnx2_nvram_read_dword(bp,
3126                                         page_start + j,
3127                                         &flash_buffer[j],
3128                                         cmd_flags);
3129
3130                                 if (rc)
3131                                         goto nvram_write_end;
3132
3133                                 cmd_flags = 0;
3134                         }
3135                 }
3136
3137                 /* Enable writes to flash interface (unlock write-protect) */
3138                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3139                         goto nvram_write_end;
3140
3141                 /* Erase the page */
3142                 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3143                         goto nvram_write_end;
3144
3145                 /* Re-enable the write again for the actual write */
3146                 bnx2_enable_nvram_write(bp);
3147
3148                 /* Loop to write back the buffer data from page_start to
3149                  * data_start */
3150                 i = 0;
3151                 if (bp->flash_info->buffered == 0) {
3152                         for (addr = page_start; addr < data_start;
3153                                 addr += 4, i += 4) {
3154
3155                                 rc = bnx2_nvram_write_dword(bp, addr,
3156                                         &flash_buffer[i], cmd_flags);
3157
3158                                 if (rc != 0)
3159                                         goto nvram_write_end;
3160
3161                                 cmd_flags = 0;
3162                         }
3163                 }
3164
3165                 /* Loop to write the new data from data_start to data_end */
3166                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3167                         if ((addr == page_end - 4) ||
3168                                 ((bp->flash_info->buffered) &&
3169                                  (addr == data_end - 4))) {
3170
3171                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3172                         }
3173                         rc = bnx2_nvram_write_dword(bp, addr, buf,
3174                                 cmd_flags);
3175
3176                         if (rc != 0)
3177                                 goto nvram_write_end;
3178
3179                         cmd_flags = 0;
3180                         buf += 4;
3181                 }
3182
3183                 /* Loop to write back the buffer data from data_end
3184                  * to page_end */
3185                 if (bp->flash_info->buffered == 0) {
3186                         for (addr = data_end; addr < page_end;
3187                                 addr += 4, i += 4) {
3188
3189                                 if (addr == page_end-4) {
3190                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
3191                                 }
3192                                 rc = bnx2_nvram_write_dword(bp, addr,
3193                                         &flash_buffer[i], cmd_flags);
3194
3195                                 if (rc != 0)
3196                                         goto nvram_write_end;
3197
3198                                 cmd_flags = 0;
3199                         }
3200                 }
3201
3202                 /* Disable writes to flash interface (lock write-protect) */
3203                 bnx2_disable_nvram_write(bp);
3204
3205                 /* Disable access to flash interface */
3206                 bnx2_disable_nvram_access(bp);
3207                 bnx2_release_nvram_lock(bp);
3208
3209                 /* Increment written */
3210                 written += data_end - data_start;
3211         }
3212
3213 nvram_write_end:
3214         if (bp->flash_info->buffered == 0)
3215                 kfree(flash_buffer);
3216
3217         if (align_start || align_end)
3218                 kfree(buf);
3219         return rc;
3220 }
3221
3222 static int
3223 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3224 {
3225         u32 val;
3226         int i, rc = 0;
3227
3228         /* Wait for the current PCI transaction to complete before
3229          * issuing a reset. */
3230         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3231                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3232                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3233                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3234                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3235         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3236         udelay(5);
3237
3238         /* Wait for the firmware to tell us it is ok to issue a reset. */
3239         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3240
3241         /* Deposit a driver reset signature so the firmware knows that
3242          * this is a soft reset. */
3243         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3244                    BNX2_DRV_RESET_SIGNATURE_MAGIC);
3245
3246         /* Do a dummy read to force the chip to complete all current transaction
3247          * before we issue a reset. */
3248         val = REG_RD(bp, BNX2_MISC_ID);
3249
3250         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3251                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3252                 REG_RD(bp, BNX2_MISC_COMMAND);
3253                 udelay(5);
3254
3255                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3256                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3257
3258                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3259
3260         } else {
3261                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3262                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3263                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3264
3265                 /* Chip reset. */
3266                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3267
3268                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3269                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3270                         current->state = TASK_UNINTERRUPTIBLE;
3271                         schedule_timeout(HZ / 50);
3272                 }
3273
3274                 /* Reset takes approximate 30 usec */
3275                 for (i = 0; i < 10; i++) {
3276                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3277                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3278                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3279                                 break;
3280                         udelay(10);
3281                 }
3282
3283                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3284                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3285                         printk(KERN_ERR PFX "Chip reset did not complete\n");
3286                         return -EBUSY;
3287                 }
3288         }
3289
3290         /* Make sure byte swapping is properly configured. */
3291         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3292         if (val != 0x01020304) {
3293                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3294                 return -ENODEV;
3295         }
3296
3297         /* Wait for the firmware to finish its initialization. */
3298         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3299         if (rc)
3300                 return rc;
3301
3302         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3303                 /* Adjust the voltage regular to two steps lower.  The default
3304                  * of this register is 0x0000000e. */
3305                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3306
3307                 /* Remove bad rbuf memory from the free pool. */
3308                 rc = bnx2_alloc_bad_rbuf(bp);
3309         }
3310
3311         return rc;
3312 }
3313
3314 static int
3315 bnx2_init_chip(struct bnx2 *bp)
3316 {
3317         u32 val;
3318         int rc;
3319
3320         /* Make sure the interrupt is not active. */
3321         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3322
3323         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3324               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3325 #ifdef __BIG_ENDIAN
3326               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3327 #endif
3328               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3329               DMA_READ_CHANS << 12 |
3330               DMA_WRITE_CHANS << 16;
3331
3332         val |= (0x2 << 20) | (1 << 11);
3333
3334         if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3335                 val |= (1 << 23);
3336
3337         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3338             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3339                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3340
3341         REG_WR(bp, BNX2_DMA_CONFIG, val);
3342
3343         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3344                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3345                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3346                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3347         }
3348
3349         if (bp->flags & PCIX_FLAG) {
3350                 u16 val16;
3351
3352                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3353                                      &val16);
3354                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3355                                       val16 & ~PCI_X_CMD_ERO);
3356         }
3357
3358         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3359                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3360                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3361                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3362
3363         /* Initialize context mapping and zero out the quick contexts.  The
3364          * context block must have already been enabled. */
3365         if (CHIP_NUM(bp) == CHIP_NUM_5709)
3366                 bnx2_init_5709_context(bp);
3367         else
3368                 bnx2_init_context(bp);
3369
3370         if ((rc = bnx2_init_cpus(bp)) != 0)
3371                 return rc;
3372
3373         bnx2_init_nvram(bp);
3374
3375         bnx2_set_mac_addr(bp);
3376
3377         val = REG_RD(bp, BNX2_MQ_CONFIG);
3378         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3379         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3380         REG_WR(bp, BNX2_MQ_CONFIG, val);
3381
3382         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3383         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3384         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3385
3386         val = (BCM_PAGE_BITS - 8) << 24;
3387         REG_WR(bp, BNX2_RV2P_CONFIG, val);
3388
3389         /* Configure page size. */
3390         val = REG_RD(bp, BNX2_TBDR_CONFIG);
3391         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3392         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3393         REG_WR(bp, BNX2_TBDR_CONFIG, val);
3394
3395         val = bp->mac_addr[0] +
3396               (bp->mac_addr[1] << 8) +
3397               (bp->mac_addr[2] << 16) +
3398               bp->mac_addr[3] +
3399               (bp->mac_addr[4] << 8) +
3400               (bp->mac_addr[5] << 16);
3401         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3402
3403         /* Program the MTU.  Also include 4 bytes for CRC32. */
3404         val = bp->dev->mtu + ETH_HLEN + 4;
3405         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3406                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3407         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3408
3409         bp->last_status_idx = 0;
3410         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3411
3412         /* Set up how to generate a link change interrupt. */
3413         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3414
3415         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3416                (u64) bp->status_blk_mapping & 0xffffffff);
3417         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3418
3419         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3420                (u64) bp->stats_blk_mapping & 0xffffffff);
3421         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3422                (u64) bp->stats_blk_mapping >> 32);
3423
3424         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3425                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3426
3427         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3428                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3429
3430         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3431                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3432
3433         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3434
3435         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3436
3437         REG_WR(bp, BNX2_HC_COM_TICKS,
3438                (bp->com_ticks_int << 16) | bp->com_ticks);
3439
3440         REG_WR(bp, BNX2_HC_CMD_TICKS,
3441                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3442
3443         REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3444         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
3445
3446         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3447                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3448         else {
3449                 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3450                        BNX2_HC_CONFIG_TX_TMR_MODE |
3451                        BNX2_HC_CONFIG_COLLECT_STATS);
3452         }
3453
3454         /* Clear internal stats counters. */
3455         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3456
3457         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3458
3459         if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3460             BNX2_PORT_FEATURE_ASF_ENABLED)
3461                 bp->flags |= ASF_ENABLE_FLAG;
3462
3463         /* Initialize the receive filter. */
3464         bnx2_set_rx_mode(bp->dev);
3465
3466         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3467                           0);
3468
3469         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3470         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3471
3472         udelay(20);
3473
3474         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3475
3476         return rc;
3477 }
3478
3479 static void
3480 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3481 {
3482         u32 val, offset0, offset1, offset2, offset3;
3483
3484         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3485                 offset0 = BNX2_L2CTX_TYPE_XI;
3486                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3487                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3488                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3489         } else {
3490                 offset0 = BNX2_L2CTX_TYPE;
3491                 offset1 = BNX2_L2CTX_CMD_TYPE;
3492                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3493                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3494         }
3495         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3496         CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3497
3498         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3499         CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3500
3501         val = (u64) bp->tx_desc_mapping >> 32;
3502         CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3503
3504         val = (u64) bp->tx_desc_mapping & 0xffffffff;
3505         CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3506 }
3507
3508 static void
3509 bnx2_init_tx_ring(struct bnx2 *bp)
3510 {
3511         struct tx_bd *txbd;
3512         u32 cid;
3513
3514         bp->tx_wake_thresh = bp->tx_ring_size / 2;
3515
3516         txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3517
3518         txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3519         txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3520
3521         bp->tx_prod = 0;
3522         bp->tx_cons = 0;
3523         bp->hw_tx_cons = 0;
3524         bp->tx_prod_bseq = 0;
3525
3526         cid = TX_CID;
3527         bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3528         bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3529
3530         bnx2_init_tx_context(bp, cid);
3531 }
3532
3533 static void
3534 bnx2_init_rx_ring(struct bnx2 *bp)
3535 {
3536         struct rx_bd *rxbd;
3537         int i;
3538         u16 prod, ring_prod;
3539         u32 val;
3540
3541         /* 8 for CRC and VLAN */
3542         bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3543         /* hw alignment */
3544         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3545
3546         ring_prod = prod = bp->rx_prod = 0;
3547         bp->rx_cons = 0;
3548         bp->hw_rx_cons = 0;
3549         bp->rx_prod_bseq = 0;
3550
3551         for (i = 0; i < bp->rx_max_ring; i++) {
3552                 int j;
3553
3554                 rxbd = &bp->rx_desc_ring[i][0];
3555                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3556                         rxbd->rx_bd_len = bp->rx_buf_use_size;
3557                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3558                 }
3559                 if (i == (bp->rx_max_ring - 1))
3560                         j = 0;
3561                 else
3562                         j = i + 1;
3563                 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3564                 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3565                                        0xffffffff;
3566         }
3567
3568         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3569         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3570         val |= 0x02 << 8;
3571         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3572
3573         val = (u64) bp->rx_desc_mapping[0] >> 32;
3574         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3575
3576         val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3577         CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3578
3579         for (i = 0; i < bp->rx_ring_size; i++) {
3580                 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3581                         break;
3582                 }
3583                 prod = NEXT_RX_BD(prod);
3584                 ring_prod = RX_RING_IDX(prod);
3585         }
3586         bp->rx_prod = prod;
3587
3588         REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3589
3590         REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3591 }
3592
3593 static void
3594 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3595 {
3596         u32 num_rings, max;
3597
3598         bp->rx_ring_size = size;
3599         num_rings = 1;
3600         while (size > MAX_RX_DESC_CNT) {
3601                 size -= MAX_RX_DESC_CNT;
3602                 num_rings++;
3603         }
3604         /* round to next power of 2 */
3605         max = MAX_RX_RINGS;
3606         while ((max & num_rings) == 0)
3607                 max >>= 1;
3608
3609         if (num_rings != max)
3610                 max <<= 1;
3611
3612         bp->rx_max_ring = max;
3613         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3614 }
3615
3616 static void
3617 bnx2_free_tx_skbs(struct bnx2 *bp)
3618 {
3619         int i;
3620
3621         if (bp->tx_buf_ring == NULL)
3622                 return;
3623
3624         for (i = 0; i < TX_DESC_CNT; ) {
3625                 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3626                 struct sk_buff *skb = tx_buf->skb;
3627                 int j, last;
3628
3629                 if (skb == NULL) {
3630                         i++;
3631                         continue;
3632                 }
3633
3634                 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3635                         skb_headlen(skb), PCI_DMA_TODEVICE);
3636
3637                 tx_buf->skb = NULL;
3638
3639                 last = skb_shinfo(skb)->nr_frags;
3640                 for (j = 0; j < last; j++) {
3641                         tx_buf = &bp->tx_buf_ring[i + j + 1];
3642                         pci_unmap_page(bp->pdev,
3643                                 pci_unmap_addr(tx_buf, mapping),
3644                                 skb_shinfo(skb)->frags[j].size,
3645                                 PCI_DMA_TODEVICE);
3646                 }
3647                 dev_kfree_skb(skb);
3648                 i += j + 1;
3649         }
3650
3651 }
3652
3653 static void
3654 bnx2_free_rx_skbs(struct bnx2 *bp)
3655 {
3656         int i;
3657
3658         if (bp->rx_buf_ring == NULL)
3659                 return;
3660
3661         for (i = 0; i < bp->rx_max_ring_idx; i++) {
3662                 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3663                 struct sk_buff *skb = rx_buf->skb;
3664
3665                 if (skb == NULL)
3666                         continue;
3667
3668                 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3669                         bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3670
3671                 rx_buf->skb = NULL;
3672
3673                 dev_kfree_skb(skb);
3674         }
3675 }
3676
3677 static void
3678 bnx2_free_skbs(struct bnx2 *bp)
3679 {
3680         bnx2_free_tx_skbs(bp);
3681         bnx2_free_rx_skbs(bp);
3682 }
3683
3684 static int
3685 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3686 {
3687         int rc;
3688
3689         rc = bnx2_reset_chip(bp, reset_code);
3690         bnx2_free_skbs(bp);
3691         if (rc)
3692                 return rc;
3693
3694         if ((rc = bnx2_init_chip(bp)) != 0)
3695                 return rc;
3696
3697         bnx2_init_tx_ring(bp);
3698         bnx2_init_rx_ring(bp);
3699         return 0;
3700 }
3701
3702 static int
3703 bnx2_init_nic(struct bnx2 *bp)
3704 {
3705         int rc;
3706
3707         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3708                 return rc;
3709
3710         spin_lock_bh(&bp->phy_lock);
3711         bnx2_init_phy(bp);
3712         spin_unlock_bh(&bp->phy_lock);
3713         bnx2_set_link(bp);
3714         return 0;
3715 }
3716
3717 static int
3718 bnx2_test_registers(struct bnx2 *bp)
3719 {
3720         int ret;
3721         int i;
3722         static const struct {
3723                 u16   offset;
3724                 u16   flags;
3725                 u32   rw_mask;
3726                 u32   ro_mask;
3727         } reg_tbl[] = {
3728                 { 0x006c, 0, 0x00000000, 0x0000003f },
3729                 { 0x0090, 0, 0xffffffff, 0x00000000 },
3730                 { 0x0094, 0, 0x00000000, 0x00000000 },
3731
3732                 { 0x0404, 0, 0x00003f00, 0x00000000 },
3733                 { 0x0418, 0, 0x00000000, 0xffffffff },
3734                 { 0x041c, 0, 0x00000000, 0xffffffff },
3735                 { 0x0420, 0, 0x00000000, 0x80ffffff },
3736                 { 0x0424, 0, 0x00000000, 0x00000000 },
3737                 { 0x0428, 0, 0x00000000, 0x00000001 },
3738                 { 0x0450, 0, 0x00000000, 0x0000ffff },
3739                 { 0x0454, 0, 0x00000000, 0xffffffff },
3740                 { 0x0458, 0, 0x00000000, 0xffffffff },
3741
3742                 { 0x0808, 0, 0x00000000, 0xffffffff },
3743                 { 0x0854, 0, 0x00000000, 0xffffffff },
3744                 { 0x0868, 0, 0x00000000, 0x77777777 },
3745                 { 0x086c, 0, 0x00000000, 0x77777777 },
3746                 { 0x0870, 0, 0x00000000, 0x77777777 },
3747                 { 0x0874, 0, 0x00000000, 0x77777777 },
3748
3749                 { 0x0c00, 0, 0x00000000, 0x00000001 },
3750                 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3751                 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3752
3753                 { 0x1000, 0, 0x00000000, 0x00000001 },
3754                 { 0x1004, 0, 0x00000000, 0x000f0001 },
3755
3756                 { 0x1408, 0, 0x01c00800, 0x00000000 },
3757                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3758                 { 0x14a8, 0, 0x00000000, 0x000001ff },
3759                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3760                 { 0x14b0, 0, 0x00000002, 0x00000001 },
3761                 { 0x14b8, 0, 0x00000000, 0x00000000 },
3762                 { 0x14c0, 0, 0x00000000, 0x00000009 },
3763                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3764                 { 0x14cc, 0, 0x00000000, 0x00000001 },
3765                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3766
3767                 { 0x1800, 0, 0x00000000, 0x00000001 },
3768                 { 0x1804, 0, 0x00000000, 0x00000003 },
3769
3770                 { 0x2800, 0, 0x00000000, 0x00000001 },
3771                 { 0x2804, 0, 0x00000000, 0x00003f01 },
3772                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3773                 { 0x2810, 0, 0xffff0000, 0x00000000 },
3774                 { 0x2814, 0, 0xffff0000, 0x00000000 },
3775                 { 0x2818, 0, 0xffff0000, 0x00000000 },
3776                 { 0x281c, 0, 0xffff0000, 0x00000000 },
3777                 { 0x2834, 0, 0xffffffff, 0x00000000 },
3778                 { 0x2840, 0, 0x00000000, 0xffffffff },
3779                 { 0x2844, 0, 0x00000000, 0xffffffff },
3780                 { 0x2848, 0, 0xffffffff, 0x00000000 },
3781                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3782
3783                 { 0x2c00, 0, 0x00000000, 0x00000011 },
3784                 { 0x2c04, 0, 0x00000000, 0x00030007 },
3785
3786                 { 0x3c00, 0, 0x00000000, 0x00000001 },
3787                 { 0x3c04, 0, 0x00000000, 0x00070000 },
3788                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3789                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3790                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3791                 { 0x3c14, 0, 0x00000000, 0xffffffff },
3792                 { 0x3c18, 0, 0x00000000, 0xffffffff },
3793                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3794                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3795
3796                 { 0x5004, 0, 0x00000000, 0x0000007f },
3797                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3798                 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3799
3800                 { 0x5c00, 0, 0x00000000, 0x00000001 },
3801                 { 0x5c04, 0, 0x00000000, 0x0003000f },
3802                 { 0x5c08, 0, 0x00000003, 0x00000000 },
3803                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3804                 { 0x5c10, 0, 0x00000000, 0xffffffff },
3805                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3806                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3807                 { 0x5c88, 0, 0x00000000, 0x00077373 },
3808                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3809
3810                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3811                 { 0x680c, 0, 0xffffffff, 0x00000000 },
3812                 { 0x6810, 0, 0xffffffff, 0x00000000 },
3813                 { 0x6814, 0, 0xffffffff, 0x00000000 },
3814                 { 0x6818, 0, 0xffffffff, 0x00000000 },
3815                 { 0x681c, 0, 0xffffffff, 0x00000000 },
3816                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3817                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3818                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3819                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3820                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3821                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3822                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3823                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3824                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3825                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3826                 { 0x684c, 0, 0xffffffff, 0x00000000 },
3827                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3828                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3829                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3830                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3831                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3832                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3833
3834                 { 0xffff, 0, 0x00000000, 0x00000000 },
3835         };
3836
3837         ret = 0;
3838         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3839                 u32 offset, rw_mask, ro_mask, save_val, val;
3840
3841                 offset = (u32) reg_tbl[i].offset;
3842                 rw_mask = reg_tbl[i].rw_mask;
3843                 ro_mask = reg_tbl[i].ro_mask;
3844
3845                 save_val = readl(bp->regview + offset);
3846
3847                 writel(0, bp->regview + offset);
3848
3849                 val = readl(bp->regview + offset);
3850                 if ((val & rw_mask) != 0) {
3851                         goto reg_test_err;
3852                 }
3853
3854                 if ((val & ro_mask) != (save_val & ro_mask)) {
3855                         goto reg_test_err;
3856                 }
3857
3858                 writel(0xffffffff, bp->regview + offset);
3859
3860                 val = readl(bp->regview + offset);
3861                 if ((val & rw_mask) != rw_mask) {
3862                         goto reg_test_err;
3863                 }
3864
3865                 if ((val & ro_mask) != (save_val & ro_mask)) {
3866                         goto reg_test_err;
3867                 }
3868
3869                 writel(save_val, bp->regview + offset);
3870                 continue;
3871
3872 reg_test_err:
3873                 writel(save_val, bp->regview + offset);
3874                 ret = -ENODEV;
3875                 break;
3876         }
3877         return ret;
3878 }
3879
3880 static int
3881 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3882 {
3883         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3884                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3885         int i;
3886
3887         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3888                 u32 offset;
3889
3890                 for (offset = 0; offset < size; offset += 4) {
3891
3892                         REG_WR_IND(bp, start + offset, test_pattern[i]);
3893
3894                         if (REG_RD_IND(bp, start + offset) !=
3895                                 test_pattern[i]) {
3896                                 return -ENODEV;
3897                         }
3898                 }
3899         }
3900         return 0;
3901 }
3902
3903 static int
3904 bnx2_test_memory(struct bnx2 *bp)
3905 {
3906         int ret = 0;
3907         int i;
3908         static const struct {
3909                 u32   offset;
3910                 u32   len;
3911         } mem_tbl[] = {
3912                 { 0x60000,  0x4000 },
3913                 { 0xa0000,  0x3000 },
3914                 { 0xe0000,  0x4000 },
3915                 { 0x120000, 0x4000 },
3916                 { 0x1a0000, 0x4000 },
3917                 { 0x160000, 0x4000 },
3918                 { 0xffffffff, 0    },
3919         };
3920
3921         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3922                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3923                         mem_tbl[i].len)) != 0) {
3924                         return ret;
3925                 }
3926         }
3927
3928         return ret;
3929 }
3930
3931 #define BNX2_MAC_LOOPBACK       0
3932 #define BNX2_PHY_LOOPBACK       1
3933
3934 static int
3935 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3936 {
3937         unsigned int pkt_size, num_pkts, i;
3938         struct sk_buff *skb, *rx_skb;
3939         unsigned char *packet;
3940         u16 rx_start_idx, rx_idx;
3941         dma_addr_t map;
3942         struct tx_bd *txbd;
3943         struct sw_bd *rx_buf;
3944         struct l2_fhdr *rx_hdr;
3945         int ret = -ENODEV;
3946
3947         if (loopback_mode == BNX2_MAC_LOOPBACK) {
3948                 bp->loopback = MAC_LOOPBACK;
3949                 bnx2_set_mac_loopback(bp);
3950         }
3951         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3952                 bp->loopback = PHY_LOOPBACK;
3953                 bnx2_set_phy_loopback(bp);
3954         }
3955         else
3956                 return -EINVAL;
3957
3958         pkt_size = 1514;
3959         skb = netdev_alloc_skb(bp->dev, pkt_size);
3960         if (!skb)
3961                 return -ENOMEM;
3962         packet = skb_put(skb, pkt_size);
3963         memcpy(packet, bp->mac_addr, 6);
3964         memset(packet + 6, 0x0, 8);
3965         for (i = 14; i < pkt_size; i++)
3966                 packet[i] = (unsigned char) (i & 0xff);
3967
3968         map = pci_map_single(bp->pdev, skb->data, pkt_size,
3969                 PCI_DMA_TODEVICE);
3970
3971         REG_WR(bp, BNX2_HC_COMMAND,
3972                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3973
3974         REG_RD(bp, BNX2_HC_COMMAND);
3975
3976         udelay(5);
3977         rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3978
3979         num_pkts = 0;
3980
3981         txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3982
3983         txbd->tx_bd_haddr_hi = (u64) map >> 32;
3984         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3985         txbd->tx_bd_mss_nbytes = pkt_size;
3986         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3987
3988         num_pkts++;
3989         bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3990         bp->tx_prod_bseq += pkt_size;
3991
3992         REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
3993         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
3994
3995         udelay(100);
3996
3997         REG_WR(bp, BNX2_HC_COMMAND,
3998                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3999
4000         REG_RD(bp, BNX2_HC_COMMAND);
4001
4002         udelay(5);
4003
4004         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4005         dev_kfree_skb(skb);
4006
4007         if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4008                 goto loopback_test_done;
4009         }
4010
4011         rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4012         if (rx_idx != rx_start_idx + num_pkts) {
4013                 goto loopback_test_done;
4014         }
4015
4016         rx_buf = &bp->rx_buf_ring[rx_start_idx];
4017         rx_skb = rx_buf->skb;
4018
4019         rx_hdr = (struct l2_fhdr *) rx_skb->data;
4020         skb_reserve(rx_skb, bp->rx_offset);
4021
4022         pci_dma_sync_single_for_cpu(bp->pdev,
4023                 pci_unmap_addr(rx_buf, mapping),
4024                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4025
4026         if (rx_hdr->l2_fhdr_status &
4027                 (L2_FHDR_ERRORS_BAD_CRC |
4028                 L2_FHDR_ERRORS_PHY_DECODE |
4029                 L2_FHDR_ERRORS_ALIGNMENT |
4030                 L2_FHDR_ERRORS_TOO_SHORT |
4031                 L2_FHDR_ERRORS_GIANT_FRAME)) {
4032
4033                 goto loopback_test_done;
4034         }
4035
4036         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4037                 goto loopback_test_done;
4038         }
4039
4040         for (i = 14; i < pkt_size; i++) {
4041                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4042                         goto loopback_test_done;
4043                 }
4044         }
4045
4046         ret = 0;
4047
4048 loopback_test_done:
4049         bp->loopback = 0;
4050         return ret;
4051 }
4052
4053 #define BNX2_MAC_LOOPBACK_FAILED        1
4054 #define BNX2_PHY_LOOPBACK_FAILED        2
4055 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
4056                                          BNX2_PHY_LOOPBACK_FAILED)
4057
4058 static int
4059 bnx2_test_loopback(struct bnx2 *bp)
4060 {
4061         int rc = 0;
4062
4063         if (!netif_running(bp->dev))
4064                 return BNX2_LOOPBACK_FAILED;
4065
4066         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4067         spin_lock_bh(&bp->phy_lock);
4068         bnx2_init_phy(bp);
4069         spin_unlock_bh(&bp->phy_lock);
4070         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4071                 rc |= BNX2_MAC_LOOPBACK_FAILED;
4072         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4073                 rc |= BNX2_PHY_LOOPBACK_FAILED;
4074         return rc;
4075 }
4076
4077 #define NVRAM_SIZE 0x200
4078 #define CRC32_RESIDUAL 0xdebb20e3
4079
4080 static int
4081 bnx2_test_nvram(struct bnx2 *bp)
4082 {
4083         u32 buf[NVRAM_SIZE / 4];
4084         u8 *data = (u8 *) buf;
4085         int rc = 0;
4086         u32 magic, csum;
4087
4088         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4089                 goto test_nvram_done;
4090
4091         magic = be32_to_cpu(buf[0]);
4092         if (magic != 0x669955aa) {
4093                 rc = -ENODEV;
4094                 goto test_nvram_done;
4095         }
4096
4097         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4098                 goto test_nvram_done;
4099
4100         csum = ether_crc_le(0x100, data);
4101         if (csum != CRC32_RESIDUAL) {
4102                 rc = -ENODEV;
4103                 goto test_nvram_done;
4104         }
4105
4106         csum = ether_crc_le(0x100, data + 0x100);
4107         if (csum != CRC32_RESIDUAL) {
4108                 rc = -ENODEV;
4109         }
4110
4111 test_nvram_done:
4112         return rc;
4113 }
4114
4115 static int
4116 bnx2_test_link(struct bnx2 *bp)
4117 {
4118         u32 bmsr;
4119
4120         spin_lock_bh(&bp->phy_lock);
4121         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4122         bnx2_read_phy(bp, MII_BMSR, &bmsr);
4123         spin_unlock_bh(&bp->phy_lock);
4124
4125         if (bmsr & BMSR_LSTATUS) {
4126                 return 0;
4127         }
4128         return -ENODEV;
4129 }
4130
4131 static int
4132 bnx2_test_intr(struct bnx2 *bp)
4133 {
4134         int i;
4135         u16 status_idx;
4136
4137         if (!netif_running(bp->dev))
4138                 return -ENODEV;
4139
4140         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4141
4142         /* This register is not touched during run-time. */
4143         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4144         REG_RD(bp, BNX2_HC_COMMAND);
4145
4146         for (i = 0; i < 10; i++) {
4147                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4148                         status_idx) {
4149
4150                         break;
4151                 }
4152
4153                 msleep_interruptible(10);
4154         }
4155         if (i < 10)
4156                 return 0;
4157
4158         return -ENODEV;
4159 }
4160
4161 static void
4162 bnx2_5706_serdes_timer(struct bnx2 *bp)
4163 {
4164         spin_lock(&bp->phy_lock);
4165         if (bp->serdes_an_pending)
4166                 bp->serdes_an_pending--;
4167         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4168                 u32 bmcr;
4169
4170                 bp->current_interval = bp->timer_interval;
4171
4172                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4173
4174                 if (bmcr & BMCR_ANENABLE) {
4175                         u32 phy1, phy2;
4176
4177                         bnx2_write_phy(bp, 0x1c, 0x7c00);
4178                         bnx2_read_phy(bp, 0x1c, &phy1);
4179
4180                         bnx2_write_phy(bp, 0x17, 0x0f01);
4181                         bnx2_read_phy(bp, 0x15, &phy2);
4182                         bnx2_write_phy(bp, 0x17, 0x0f01);
4183                         bnx2_read_phy(bp, 0x15, &phy2);
4184
4185                         if ((phy1 & 0x10) &&    /* SIGNAL DETECT */
4186                                 !(phy2 & 0x20)) {       /* no CONFIG */
4187
4188                                 bmcr &= ~BMCR_ANENABLE;
4189                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4190                                 bnx2_write_phy(bp, MII_BMCR, bmcr);
4191                                 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4192                         }
4193                 }
4194         }
4195         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4196                  (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4197                 u32 phy2;
4198
4199                 bnx2_write_phy(bp, 0x17, 0x0f01);
4200                 bnx2_read_phy(bp, 0x15, &phy2);
4201                 if (phy2 & 0x20) {
4202                         u32 bmcr;
4203
4204                         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4205                         bmcr |= BMCR_ANENABLE;
4206                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4207
4208                         bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4209                 }
4210         } else
4211                 bp->current_interval = bp->timer_interval;
4212
4213         spin_unlock(&bp->phy_lock);
4214 }
4215
4216 static void
4217 bnx2_5708_serdes_timer(struct bnx2 *bp)
4218 {
4219         if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4220                 bp->serdes_an_pending = 0;
4221                 return;
4222         }
4223
4224         spin_lock(&bp->phy_lock);
4225         if (bp->serdes_an_pending)
4226                 bp->serdes_an_pending--;
4227         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4228                 u32 bmcr;
4229
4230                 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4231
4232                 if (bmcr & BMCR_ANENABLE) {
4233                         bmcr &= ~BMCR_ANENABLE;
4234                         bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4235                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4236                         bp->current_interval = SERDES_FORCED_TIMEOUT;
4237                 } else {
4238                         bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4239                         bmcr |= BMCR_ANENABLE;
4240                         bnx2_write_phy(bp, MII_BMCR, bmcr);
4241                         bp->serdes_an_pending = 2;
4242                         bp->current_interval = bp->timer_interval;
4243                 }
4244
4245         } else
4246                 bp->current_interval = bp->timer_interval;
4247
4248         spin_unlock(&bp->phy_lock);
4249 }
4250
4251 static void
4252 bnx2_timer(unsigned long data)
4253 {
4254         struct bnx2 *bp = (struct bnx2 *) data;
4255         u32 msg;
4256
4257         if (!netif_running(bp->dev))
4258                 return;
4259
4260         if (atomic_read(&bp->intr_sem) != 0)
4261                 goto bnx2_restart_timer;
4262
4263         msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4264         REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4265
4266         bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4267
4268         if (bp->phy_flags & PHY_SERDES_FLAG) {
4269                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4270                         bnx2_5706_serdes_timer(bp);
4271                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4272                         bnx2_5708_serdes_timer(bp);
4273         }
4274
4275 bnx2_restart_timer:
4276         mod_timer(&bp->timer, jiffies + bp->current_interval);
4277 }
4278
4279 /* Called with rtnl_lock */
4280 static int
4281 bnx2_open(struct net_device *dev)
4282 {
4283         struct bnx2 *bp = netdev_priv(dev);
4284         int rc;
4285
4286         bnx2_set_power_state(bp, PCI_D0);
4287         bnx2_disable_int(bp);
4288
4289         rc = bnx2_alloc_mem(bp);
4290         if (rc)
4291                 return rc;
4292
4293         if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4294                 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4295                 !disable_msi) {
4296
4297                 if (pci_enable_msi(bp->pdev) == 0) {
4298                         bp->flags |= USING_MSI_FLAG;
4299                         rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4300                                         dev);
4301                 }
4302                 else {
4303                         rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4304                                         IRQF_SHARED, dev->name, dev);
4305                 }
4306         }
4307         else {
4308                 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4309                                 dev->name, dev);
4310         }
4311         if (rc) {
4312                 bnx2_free_mem(bp);
4313                 return rc;
4314         }
4315
4316         rc = bnx2_init_nic(bp);
4317
4318         if (rc) {
4319                 free_irq(bp->pdev->irq, dev);
4320                 if (bp->flags & USING_MSI_FLAG) {
4321                         pci_disable_msi(bp->pdev);
4322                         bp->flags &= ~USING_MSI_FLAG;
4323                 }
4324                 bnx2_free_skbs(bp);
4325                 bnx2_free_mem(bp);
4326                 return rc;
4327         }
4328
4329         mod_timer(&bp->timer, jiffies + bp->current_interval);
4330
4331         atomic_set(&bp->intr_sem, 0);
4332
4333         bnx2_enable_int(bp);
4334
4335         if (bp->flags & USING_MSI_FLAG) {
4336                 /* Test MSI to make sure it is working
4337                  * If MSI test fails, go back to INTx mode
4338                  */
4339                 if (bnx2_test_intr(bp) != 0) {
4340                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
4341                                " using MSI, switching to INTx mode. Please"
4342                                " report this failure to the PCI maintainer"
4343                                " and include system chipset information.\n",
4344                                bp->dev->name);
4345
4346                         bnx2_disable_int(bp);
4347                         free_irq(bp->pdev->irq, dev);
4348                         pci_disable_msi(bp->pdev);
4349                         bp->flags &= ~USING_MSI_FLAG;
4350
4351                         rc = bnx2_init_nic(bp);
4352
4353                         if (!rc) {
4354                                 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4355                                         IRQF_SHARED, dev->name, dev);
4356                         }
4357                         if (rc) {
4358                                 bnx2_free_skbs(bp);
4359                                 bnx2_free_mem(bp);
4360                                 del_timer_sync(&bp->timer);
4361                                 return rc;
4362                         }
4363                         bnx2_enable_int(bp);
4364                 }
4365         }
4366         if (bp->flags & USING_MSI_FLAG) {
4367                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4368         }
4369
4370         netif_start_queue(dev);
4371
4372         return 0;
4373 }
4374
4375 static void
4376 bnx2_reset_task(void *data)
4377 {
4378         struct bnx2 *bp = data;
4379
4380         if (!netif_running(bp->dev))
4381                 return;
4382
4383         bp->in_reset_task = 1;
4384         bnx2_netif_stop(bp);
4385
4386         bnx2_init_nic(bp);
4387
4388         atomic_set(&bp->intr_sem, 1);
4389         bnx2_netif_start(bp);
4390         bp->in_reset_task = 0;
4391 }
4392
4393 static void
4394 bnx2_tx_timeout(struct net_device *dev)
4395 {
4396         struct bnx2 *bp = netdev_priv(dev);
4397
4398         /* This allows the netif to be shutdown gracefully before resetting */
4399         schedule_work(&bp->reset_task);
4400 }
4401
4402 #ifdef BCM_VLAN
4403 /* Called with rtnl_lock */
4404 static void
4405 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4406 {
4407         struct bnx2 *bp = netdev_priv(dev);
4408
4409         bnx2_netif_stop(bp);
4410
4411         bp->vlgrp = vlgrp;
4412         bnx2_set_rx_mode(dev);
4413
4414         bnx2_netif_start(bp);
4415 }
4416
4417 /* Called with rtnl_lock */
4418 static void
4419 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4420 {
4421         struct bnx2 *bp = netdev_priv(dev);
4422
4423         bnx2_netif_stop(bp);
4424
4425         if (bp->vlgrp)
4426                 bp->vlgrp->vlan_devices[vid] = NULL;
4427         bnx2_set_rx_mode(dev);
4428
4429         bnx2_netif_start(bp);
4430 }
4431 #endif
4432
4433 /* Called with netif_tx_lock.
4434  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4435  * netif_wake_queue().
4436  */
4437 static int
4438 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4439 {
4440         struct bnx2 *bp = netdev_priv(dev);
4441         dma_addr_t mapping;
4442         struct tx_bd *txbd;
4443         struct sw_bd *tx_buf;
4444         u32 len, vlan_tag_flags, last_frag, mss;
4445         u16 prod, ring_prod;
4446         int i;
4447
4448         if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4449                 netif_stop_queue(dev);
4450                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4451                         dev->name);
4452
4453                 return NETDEV_TX_BUSY;
4454         }
4455         len = skb_headlen(skb);
4456         prod = bp->tx_prod;
4457         ring_prod = TX_RING_IDX(prod);
4458
4459         vlan_tag_flags = 0;
4460         if (skb->ip_summed == CHECKSUM_PARTIAL) {
4461                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4462         }
4463
4464         if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4465                 vlan_tag_flags |=
4466                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4467         }
4468 #ifdef BCM_TSO
4469         if ((mss = skb_shinfo(skb)->gso_size) &&
4470                 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4471                 u32 tcp_opt_len, ip_tcp_len;
4472
4473                 if (skb_header_cloned(skb) &&
4474                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4475                         dev_kfree_skb(skb);
4476                         return NETDEV_TX_OK;
4477                 }
4478
4479                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4480                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4481
4482                 tcp_opt_len = 0;
4483                 if (skb->h.th->doff > 5) {
4484                         tcp_opt_len = (skb->h.th->doff - 5) << 2;
4485                 }
4486                 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4487
4488                 skb->nh.iph->check = 0;
4489                 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4490                 skb->h.th->check =
4491                         ~csum_tcpudp_magic(skb->nh.iph->saddr,
4492                                             skb->nh.iph->daddr,
4493                                             0, IPPROTO_TCP, 0);
4494
4495                 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4496                         vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4497                                 (tcp_opt_len >> 2)) << 8;
4498                 }
4499         }
4500         else
4501 #endif
4502         {
4503                 mss = 0;
4504         }
4505
4506         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4507
4508         tx_buf = &bp->tx_buf_ring[ring_prod];
4509         tx_buf->skb = skb;
4510         pci_unmap_addr_set(tx_buf, mapping, mapping);
4511
4512         txbd = &bp->tx_desc_ring[ring_prod];
4513
4514         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4515         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4516         txbd->tx_bd_mss_nbytes = len | (mss << 16);
4517         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4518
4519         last_frag = skb_shinfo(skb)->nr_frags;
4520
4521         for (i = 0; i < last_frag; i++) {
4522                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4523
4524                 prod = NEXT_TX_BD(prod);
4525                 ring_prod = TX_RING_IDX(prod);
4526                 txbd = &bp->tx_desc_ring[ring_prod];
4527
4528                 len = frag->size;
4529                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4530                         len, PCI_DMA_TODEVICE);
4531                 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4532                                 mapping, mapping);
4533
4534                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4535                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4536                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4537                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4538
4539         }
4540         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4541
4542         prod = NEXT_TX_BD(prod);
4543         bp->tx_prod_bseq += skb->len;
4544
4545         REG_WR16(bp, bp->tx_bidx_addr, prod);
4546         REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4547
4548         mmiowb();
4549
4550         bp->tx_prod = prod;
4551         dev->trans_start = jiffies;
4552
4553         if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4554                 netif_stop_queue(dev);
4555                 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4556                         netif_wake_queue(dev);
4557         }
4558
4559         return NETDEV_TX_OK;
4560 }
4561
4562 /* Called with rtnl_lock */
4563 static int
4564 bnx2_close(struct net_device *dev)
4565 {
4566         struct bnx2 *bp = netdev_priv(dev);
4567         u32 reset_code;
4568
4569         /* Calling flush_scheduled_work() may deadlock because
4570          * linkwatch_event() may be on the workqueue and it will try to get
4571          * the rtnl_lock which we are holding.
4572          */
4573         while (bp->in_reset_task)
4574                 msleep(1);
4575
4576         bnx2_netif_stop(bp);
4577         del_timer_sync(&bp->timer);
4578         if (bp->flags & NO_WOL_FLAG)
4579                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4580         else if (bp->wol)
4581                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4582         else
4583                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4584         bnx2_reset_chip(bp, reset_code);
4585         free_irq(bp->pdev->irq, dev);
4586         if (bp->flags & USING_MSI_FLAG) {
4587                 pci_disable_msi(bp->pdev);
4588                 bp->flags &= ~USING_MSI_FLAG;
4589         }
4590         bnx2_free_skbs(bp);
4591         bnx2_free_mem(bp);
4592         bp->link_up = 0;
4593         netif_carrier_off(bp->dev);
4594         bnx2_set_power_state(bp, PCI_D3hot);
4595         return 0;
4596 }
4597
4598 #define GET_NET_STATS64(ctr)                                    \
4599         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
4600         (unsigned long) (ctr##_lo)
4601
4602 #define GET_NET_STATS32(ctr)            \
4603         (ctr##_lo)
4604
4605 #if (BITS_PER_LONG == 64)
4606 #define GET_NET_STATS   GET_NET_STATS64
4607 #else
4608 #define GET_NET_STATS   GET_NET_STATS32
4609 #endif
4610
4611 static struct net_device_stats *
4612 bnx2_get_stats(struct net_device *dev)
4613 {
4614         struct bnx2 *bp = netdev_priv(dev);
4615         struct statistics_block *stats_blk = bp->stats_blk;
4616         struct net_device_stats *net_stats = &bp->net_stats;
4617
4618         if (bp->stats_blk == NULL) {
4619                 return net_stats;
4620         }
4621         net_stats->rx_packets =
4622                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4623                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4624                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4625
4626         net_stats->tx_packets =
4627                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4628                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4629                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4630
4631         net_stats->rx_bytes =
4632                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4633
4634         net_stats->tx_bytes =
4635                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4636
4637         net_stats->multicast =
4638                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4639
4640         net_stats->collisions =
4641                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4642
4643         net_stats->rx_length_errors =
4644                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4645                 stats_blk->stat_EtherStatsOverrsizePkts);
4646
4647         net_stats->rx_over_errors =
4648                 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4649
4650         net_stats->rx_frame_errors =
4651                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4652
4653         net_stats->rx_crc_errors =
4654                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4655
4656         net_stats->rx_errors = net_stats->rx_length_errors +
4657                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4658                 net_stats->rx_crc_errors;
4659
4660         net_stats->tx_aborted_errors =
4661                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4662                 stats_blk->stat_Dot3StatsLateCollisions);
4663
4664         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4665             (CHIP_ID(bp) == CHIP_ID_5708_A0))
4666                 net_stats->tx_carrier_errors = 0;
4667         else {
4668                 net_stats->tx_carrier_errors =
4669                         (unsigned long)
4670                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
4671         }
4672
4673         net_stats->tx_errors =
4674                 (unsigned long)
4675                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4676                 +
4677                 net_stats->tx_aborted_errors +
4678                 net_stats->tx_carrier_errors;
4679
4680         net_stats->rx_missed_errors =
4681                 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4682                 stats_blk->stat_FwRxDrop);
4683
4684         return net_stats;
4685 }
4686
4687 /* All ethtool functions called with rtnl_lock */
4688
4689 static int
4690 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4691 {
4692         struct bnx2 *bp = netdev_priv(dev);
4693
4694         cmd->supported = SUPPORTED_Autoneg;
4695         if (bp->phy_flags & PHY_SERDES_FLAG) {
4696                 cmd->supported |= SUPPORTED_1000baseT_Full |
4697                         SUPPORTED_FIBRE;
4698
4699                 cmd->port = PORT_FIBRE;
4700         }
4701         else {
4702                 cmd->supported |= SUPPORTED_10baseT_Half |
4703                         SUPPORTED_10baseT_Full |
4704                         SUPPORTED_100baseT_Half |
4705                         SUPPORTED_100baseT_Full |
4706                         SUPPORTED_1000baseT_Full |
4707                         SUPPORTED_TP;
4708
4709                 cmd->port = PORT_TP;
4710         }
4711
4712         cmd->advertising = bp->advertising;
4713
4714         if (bp->autoneg & AUTONEG_SPEED) {
4715                 cmd->autoneg = AUTONEG_ENABLE;
4716         }
4717         else {
4718                 cmd->autoneg = AUTONEG_DISABLE;
4719         }
4720
4721         if (netif_carrier_ok(dev)) {
4722                 cmd->speed = bp->line_speed;
4723                 cmd->duplex = bp->duplex;
4724         }
4725         else {
4726                 cmd->speed = -1;
4727                 cmd->duplex = -1;
4728         }
4729
4730         cmd->transceiver = XCVR_INTERNAL;
4731         cmd->phy_address = bp->phy_addr;
4732
4733         return 0;
4734 }
4735
4736 static int
4737 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4738 {
4739         struct bnx2 *bp = netdev_priv(dev);
4740         u8 autoneg = bp->autoneg;
4741         u8 req_duplex = bp->req_duplex;
4742         u16 req_line_speed = bp->req_line_speed;
4743         u32 advertising = bp->advertising;
4744
4745         if (cmd->autoneg == AUTONEG_ENABLE) {
4746                 autoneg |= AUTONEG_SPEED;
4747
4748                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4749
4750                 /* allow advertising 1 speed */
4751                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4752                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
4753                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
4754                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
4755
4756                         if (bp->phy_flags & PHY_SERDES_FLAG)
4757                                 return -EINVAL;
4758
4759                         advertising = cmd->advertising;
4760
4761                 }
4762                 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4763                         advertising = cmd->advertising;
4764                 }
4765                 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4766                         return -EINVAL;
4767                 }
4768                 else {
4769                         if (bp->phy_flags & PHY_SERDES_FLAG) {
4770                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4771                         }
4772                         else {
4773                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
4774                         }
4775                 }
4776                 advertising |= ADVERTISED_Autoneg;
4777         }
4778         else {
4779                 if (bp->phy_flags & PHY_SERDES_FLAG) {
4780                         if ((cmd->speed != SPEED_1000 &&
4781                              cmd->speed != SPEED_2500) ||
4782                             (cmd->duplex != DUPLEX_FULL))
4783                                 return -EINVAL;
4784
4785                         if (cmd->speed == SPEED_2500 &&
4786                             !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4787                                 return -EINVAL;
4788                 }
4789                 else if (cmd->speed == SPEED_1000) {
4790                         return -EINVAL;
4791                 }
4792                 autoneg &= ~AUTONEG_SPEED;
4793                 req_line_speed = cmd->speed;
4794                 req_duplex = cmd->duplex;
4795                 advertising = 0;
4796         }
4797
4798         bp->autoneg = autoneg;
4799         bp->advertising = advertising;
4800         bp->req_line_speed = req_line_speed;
4801         bp->req_duplex = req_duplex;
4802
4803         spin_lock_bh(&bp->phy_lock);
4804
4805         bnx2_setup_phy(bp);
4806
4807         spin_unlock_bh(&bp->phy_lock);
4808
4809         return 0;
4810 }
4811
4812 static void
4813 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4814 {
4815         struct bnx2 *bp = netdev_priv(dev);
4816
4817         strcpy(info->driver, DRV_MODULE_NAME);
4818         strcpy(info->version, DRV_MODULE_VERSION);
4819         strcpy(info->bus_info, pci_name(bp->pdev));
4820         info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4821         info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4822         info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4823         info->fw_version[1] = info->fw_version[3] = '.';
4824         info->fw_version[5] = 0;
4825 }
4826
4827 #define BNX2_REGDUMP_LEN                (32 * 1024)
4828
4829 static int
4830 bnx2_get_regs_len(struct net_device *dev)
4831 {
4832         return BNX2_REGDUMP_LEN;
4833 }
4834
4835 static void
4836 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4837 {
4838         u32 *p = _p, i, offset;
4839         u8 *orig_p = _p;
4840         struct bnx2 *bp = netdev_priv(dev);
4841         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4842                                  0x0800, 0x0880, 0x0c00, 0x0c10,
4843                                  0x0c30, 0x0d08, 0x1000, 0x101c,
4844                                  0x1040, 0x1048, 0x1080, 0x10a4,
4845                                  0x1400, 0x1490, 0x1498, 0x14f0,
4846                                  0x1500, 0x155c, 0x1580, 0x15dc,
4847                                  0x1600, 0x1658, 0x1680, 0x16d8,
4848                                  0x1800, 0x1820, 0x1840, 0x1854,
4849                                  0x1880, 0x1894, 0x1900, 0x1984,
4850                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4851                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
4852                                  0x2000, 0x2030, 0x23c0, 0x2400,
4853                                  0x2800, 0x2820, 0x2830, 0x2850,
4854                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
4855                                  0x3c00, 0x3c94, 0x4000, 0x4010,
4856                                  0x4080, 0x4090, 0x43c0, 0x4458,
4857                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
4858                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
4859                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
4860                                  0x5fc0, 0x6000, 0x6400, 0x6428,
4861                                  0x6800, 0x6848, 0x684c, 0x6860,
4862                                  0x6888, 0x6910, 0x8000 };
4863
4864         regs->version = 0;
4865
4866         memset(p, 0, BNX2_REGDUMP_LEN);
4867
4868         if (!netif_running(bp->dev))
4869                 return;
4870
4871         i = 0;
4872         offset = reg_boundaries[0];
4873         p += offset;
4874         while (offset < BNX2_REGDUMP_LEN) {
4875                 *p++ = REG_RD(bp, offset);
4876                 offset += 4;
4877                 if (offset == reg_boundaries[i + 1]) {
4878                         offset = reg_boundaries[i + 2];
4879                         p = (u32 *) (orig_p + offset);
4880                         i += 2;
4881                 }
4882         }
4883 }
4884
4885 static void
4886 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4887 {
4888         struct bnx2 *bp = netdev_priv(dev);
4889
4890         if (bp->flags & NO_WOL_FLAG) {
4891                 wol->supported = 0;
4892                 wol->wolopts = 0;
4893         }
4894         else {
4895                 wol->supported = WAKE_MAGIC;
4896                 if (bp->wol)
4897                         wol->wolopts = WAKE_MAGIC;
4898                 else
4899                         wol->wolopts = 0;
4900         }
4901         memset(&wol->sopass, 0, sizeof(wol->sopass));
4902 }
4903
4904 static int
4905 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4906 {
4907         struct bnx2 *bp = netdev_priv(dev);
4908
4909         if (wol->wolopts & ~WAKE_MAGIC)
4910                 return -EINVAL;
4911
4912         if (wol->wolopts & WAKE_MAGIC) {
4913                 if (bp->flags & NO_WOL_FLAG)
4914                         return -EINVAL;
4915
4916                 bp->wol = 1;
4917         }
4918         else {
4919                 bp->wol = 0;
4920         }
4921         return 0;
4922 }
4923
4924 static int
4925 bnx2_nway_reset(struct net_device *dev)
4926 {
4927         struct bnx2 *bp = netdev_priv(dev);
4928         u32 bmcr;
4929
4930         if (!(bp->autoneg & AUTONEG_SPEED)) {
4931                 return -EINVAL;
4932         }
4933
4934         spin_lock_bh(&bp->phy_lock);
4935
4936         /* Force a link down visible on the other side */
4937         if (bp->phy_flags & PHY_SERDES_FLAG) {
4938                 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4939                 spin_unlock_bh(&bp->phy_lock);
4940
4941                 msleep(20);
4942
4943                 spin_lock_bh(&bp->phy_lock);
4944
4945                 bp->current_interval = SERDES_AN_TIMEOUT;
4946                 bp->serdes_an_pending = 1;
4947                 mod_timer(&bp->timer, jiffies + bp->current_interval);
4948         }
4949
4950         bnx2_read_phy(bp, MII_BMCR, &bmcr);
4951         bmcr &= ~BMCR_LOOPBACK;
4952         bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4953
4954         spin_unlock_bh(&bp->phy_lock);
4955
4956         return 0;
4957 }
4958
4959 static int
4960 bnx2_get_eeprom_len(struct net_device *dev)
4961 {
4962         struct bnx2 *bp = netdev_priv(dev);
4963
4964         if (bp->flash_info == NULL)
4965                 return 0;
4966
4967         return (int) bp->flash_size;
4968 }
4969
4970 static int
4971 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4972                 u8 *eebuf)
4973 {
4974         struct bnx2 *bp = netdev_priv(dev);
4975         int rc;
4976
4977         /* parameters already validated in ethtool_get_eeprom */
4978
4979         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4980
4981         return rc;
4982 }
4983
4984 static int
4985 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4986                 u8 *eebuf)
4987 {
4988         struct bnx2 *bp = netdev_priv(dev);
4989         int rc;
4990
4991         /* parameters already validated in ethtool_set_eeprom */
4992
4993         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4994
4995         return rc;
4996 }
4997
4998 static int
4999 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5000 {
5001         struct bnx2 *bp = netdev_priv(dev);
5002
5003         memset(coal, 0, sizeof(struct ethtool_coalesce));
5004
5005         coal->rx_coalesce_usecs = bp->rx_ticks;
5006         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5007         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5008         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5009
5010         coal->tx_coalesce_usecs = bp->tx_ticks;
5011         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5012         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5013         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5014
5015         coal->stats_block_coalesce_usecs = bp->stats_ticks;
5016
5017         return 0;
5018 }
5019
5020 static int
5021 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5022 {
5023         struct bnx2 *bp = netdev_priv(dev);
5024
5025         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5026         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5027
5028         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5029         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5030
5031         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5032         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5033
5034         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5035         if (bp->rx_quick_cons_trip_int > 0xff)
5036                 bp->rx_quick_cons_trip_int = 0xff;
5037
5038         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5039         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5040
5041         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5042         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5043
5044         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5045         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5046
5047         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5048         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5049                 0xff;
5050
5051         bp->stats_ticks = coal->stats_block_coalesce_usecs;
5052         if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5053         bp->stats_ticks &= 0xffff00;
5054
5055         if (netif_running(bp->dev)) {
5056                 bnx2_netif_stop(bp);
5057                 bnx2_init_nic(bp);
5058                 bnx2_netif_start(bp);
5059         }
5060
5061         return 0;
5062 }
5063
5064 static void
5065 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5066 {
5067         struct bnx2 *bp = netdev_priv(dev);
5068
5069         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5070         ering->rx_mini_max_pending = 0;
5071         ering->rx_jumbo_max_pending = 0;
5072
5073         ering->rx_pending = bp->rx_ring_size;
5074         ering->rx_mini_pending = 0;
5075         ering->rx_jumbo_pending = 0;
5076
5077         ering->tx_max_pending = MAX_TX_DESC_CNT;
5078         ering->tx_pending = bp->tx_ring_size;
5079 }
5080
5081 static int
5082 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5083 {
5084         struct bnx2 *bp = netdev_priv(dev);
5085
5086         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5087                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5088                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5089
5090                 return -EINVAL;
5091         }
5092         if (netif_running(bp->dev)) {
5093                 bnx2_netif_stop(bp);
5094                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5095                 bnx2_free_skbs(bp);
5096                 bnx2_free_mem(bp);
5097         }
5098
5099         bnx2_set_rx_ring_size(bp, ering->rx_pending);
5100         bp->tx_ring_size = ering->tx_pending;
5101
5102         if (netif_running(bp->dev)) {
5103                 int rc;
5104
5105                 rc = bnx2_alloc_mem(bp);
5106                 if (rc)
5107                         return rc;
5108                 bnx2_init_nic(bp);
5109                 bnx2_netif_start(bp);
5110         }
5111
5112         return 0;
5113 }
5114
5115 static void
5116 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5117 {
5118         struct bnx2 *bp = netdev_priv(dev);
5119
5120         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5121         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5122         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5123 }
5124
5125 static int
5126 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5127 {
5128         struct bnx2 *bp = netdev_priv(dev);
5129
5130         bp->req_flow_ctrl = 0;
5131         if (epause->rx_pause)
5132                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5133         if (epause->tx_pause)
5134                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5135
5136         if (epause->autoneg) {
5137                 bp->autoneg |= AUTONEG_FLOW_CTRL;
5138         }
5139         else {
5140                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5141         }
5142
5143         spin_lock_bh(&bp->phy_lock);
5144
5145         bnx2_setup_phy(bp);
5146
5147         spin_unlock_bh(&bp->phy_lock);
5148
5149         return 0;
5150 }
5151
5152 static u32
5153 bnx2_get_rx_csum(struct net_device *dev)
5154 {
5155         struct bnx2 *bp = netdev_priv(dev);
5156
5157         return bp->rx_csum;
5158 }
5159
5160 static int
5161 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5162 {
5163         struct bnx2 *bp = netdev_priv(dev);
5164
5165         bp->rx_csum = data;
5166         return 0;
5167 }
5168
5169 static int
5170 bnx2_set_tso(struct net_device *dev, u32 data)
5171 {
5172         if (data)
5173                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5174         else
5175                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5176         return 0;
5177 }
5178
5179 #define BNX2_NUM_STATS 46
5180
5181 static struct {
5182         char string[ETH_GSTRING_LEN];
5183 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5184         { "rx_bytes" },
5185         { "rx_error_bytes" },
5186         { "tx_bytes" },
5187         { "tx_error_bytes" },
5188         { "rx_ucast_packets" },
5189         { "rx_mcast_packets" },
5190         { "rx_bcast_packets" },
5191         { "tx_ucast_packets" },
5192         { "tx_mcast_packets" },
5193         { "tx_bcast_packets" },
5194         { "tx_mac_errors" },
5195         { "tx_carrier_errors" },
5196         { "rx_crc_errors" },
5197         { "rx_align_errors" },
5198         { "tx_single_collisions" },
5199         { "tx_multi_collisions" },
5200         { "tx_deferred" },
5201         { "tx_excess_collisions" },
5202         { "tx_late_collisions" },
5203         { "tx_total_collisions" },
5204         { "rx_fragments" },
5205         { "rx_jabbers" },
5206         { "rx_undersize_packets" },
5207         { "rx_oversize_packets" },
5208         { "rx_64_byte_packets" },
5209         { "rx_65_to_127_byte_packets" },
5210         { "rx_128_to_255_byte_packets" },
5211         { "rx_256_to_511_byte_packets" },
5212         { "rx_512_to_1023_byte_packets" },
5213         { "rx_1024_to_1522_byte_packets" },
5214         { "rx_1523_to_9022_byte_packets" },
5215         { "tx_64_byte_packets" },
5216         { "tx_65_to_127_byte_packets" },
5217         { "tx_128_to_255_byte_packets" },
5218         { "tx_256_to_511_byte_packets" },
5219         { "tx_512_to_1023_byte_packets" },
5220         { "tx_1024_to_1522_byte_packets" },
5221         { "tx_1523_to_9022_byte_packets" },
5222         { "rx_xon_frames" },
5223         { "rx_xoff_frames" },
5224         { "tx_xon_frames" },
5225         { "tx_xoff_frames" },
5226         { "rx_mac_ctrl_frames" },
5227         { "rx_filtered_packets" },
5228         { "rx_discards" },
5229         { "rx_fw_discards" },
5230 };
5231
5232 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5233
5234 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5235     STATS_OFFSET32(stat_IfHCInOctets_hi),
5236     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5237     STATS_OFFSET32(stat_IfHCOutOctets_hi),
5238     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5239     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5240     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5241     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5242     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5243     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5244     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5245     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5246     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5247     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5248     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5249     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5250     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5251     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5252     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5253     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5254     STATS_OFFSET32(stat_EtherStatsCollisions),
5255     STATS_OFFSET32(stat_EtherStatsFragments),
5256     STATS_OFFSET32(stat_EtherStatsJabbers),
5257     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5258     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5259     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5260     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5261     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5262     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5263     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5264     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5265     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5266     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5267     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5268     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5269     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5270     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5271     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5272     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5273     STATS_OFFSET32(stat_XonPauseFramesReceived),
5274     STATS_OFFSET32(stat_XoffPauseFramesReceived),
5275     STATS_OFFSET32(stat_OutXonSent),
5276     STATS_OFFSET32(stat_OutXoffSent),
5277     STATS_OFFSET32(stat_MacControlFramesReceived),
5278     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5279     STATS_OFFSET32(stat_IfInMBUFDiscards),
5280     STATS_OFFSET32(stat_FwRxDrop),
5281 };
5282
5283 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5284  * skipped because of errata.
5285  */
5286 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5287         8,0,8,8,8,8,8,8,8,8,
5288         4,0,4,4,4,4,4,4,4,4,
5289         4,4,4,4,4,4,4,4,4,4,
5290         4,4,4,4,4,4,4,4,4,4,
5291         4,4,4,4,4,4,
5292 };
5293
5294 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5295         8,0,8,8,8,8,8,8,8,8,
5296         4,4,4,4,4,4,4,4,4,4,
5297         4,4,4,4,4,4,4,4,4,4,
5298         4,4,4,4,4,4,4,4,4,4,
5299         4,4,4,4,4,4,
5300 };
5301
5302 #define BNX2_NUM_TESTS 6
5303
5304 static struct {
5305         char string[ETH_GSTRING_LEN];
5306 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5307         { "register_test (offline)" },
5308         { "memory_test (offline)" },
5309         { "loopback_test (offline)" },
5310         { "nvram_test (online)" },
5311         { "interrupt_test (online)" },
5312         { "link_test (online)" },
5313 };
5314
5315 static int
5316 bnx2_self_test_count(struct net_device *dev)
5317 {
5318         return BNX2_NUM_TESTS;
5319 }
5320
5321 static void
5322 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5323 {
5324         struct bnx2 *bp = netdev_priv(dev);
5325
5326         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5327         if (etest->flags & ETH_TEST_FL_OFFLINE) {
5328                 int i;
5329
5330                 bnx2_netif_stop(bp);
5331                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5332                 bnx2_free_skbs(bp);
5333
5334                 if (bnx2_test_registers(bp) != 0) {
5335                         buf[0] = 1;
5336                         etest->flags |= ETH_TEST_FL_FAILED;
5337                 }
5338                 if (bnx2_test_memory(bp) != 0) {
5339                         buf[1] = 1;
5340                         etest->flags |= ETH_TEST_FL_FAILED;
5341                 }
5342                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5343                         etest->flags |= ETH_TEST_FL_FAILED;
5344
5345                 if (!netif_running(bp->dev)) {
5346                         bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5347                 }
5348                 else {
5349                         bnx2_init_nic(bp);
5350                         bnx2_netif_start(bp);
5351                 }
5352
5353                 /* wait for link up */
5354                 for (i = 0; i < 7; i++) {
5355                         if (bp->link_up)
5356                                 break;
5357                         msleep_interruptible(1000);
5358                 }
5359         }
5360
5361         if (bnx2_test_nvram(bp) != 0) {
5362                 buf[3] = 1;
5363                 etest->flags |= ETH_TEST_FL_FAILED;
5364         }
5365         if (bnx2_test_intr(bp) != 0) {
5366                 buf[4] = 1;
5367                 etest->flags |= ETH_TEST_FL_FAILED;
5368         }
5369
5370         if (bnx2_test_link(bp) != 0) {
5371                 buf[5] = 1;
5372                 etest->flags |= ETH_TEST_FL_FAILED;
5373
5374         }
5375 }
5376
5377 static void
5378 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5379 {
5380         switch (stringset) {
5381         case ETH_SS_STATS:
5382                 memcpy(buf, bnx2_stats_str_arr,
5383                         sizeof(bnx2_stats_str_arr));
5384                 break;
5385         case ETH_SS_TEST:
5386                 memcpy(buf, bnx2_tests_str_arr,
5387                         sizeof(bnx2_tests_str_arr));
5388                 break;
5389         }
5390 }
5391
5392 static int
5393 bnx2_get_stats_count(struct net_device *dev)
5394 {
5395         return BNX2_NUM_STATS;
5396 }
5397
5398 static void
5399 bnx2_get_ethtool_stats(struct net_device *dev,
5400                 struct ethtool_stats *stats, u64 *buf)
5401 {
5402         struct bnx2 *bp = netdev_priv(dev);
5403         int i;
5404         u32 *hw_stats = (u32 *) bp->stats_blk;
5405         u8 *stats_len_arr = NULL;
5406
5407         if (hw_stats == NULL) {
5408                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5409                 return;
5410         }
5411
5412         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5413             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5414             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5415             (CHIP_ID(bp) == CHIP_ID_5708_A0))
5416                 stats_len_arr = bnx2_5706_stats_len_arr;
5417         else
5418                 stats_len_arr = bnx2_5708_stats_len_arr;
5419
5420         for (i = 0; i < BNX2_NUM_STATS; i++) {
5421                 if (stats_len_arr[i] == 0) {
5422                         /* skip this counter */
5423                         buf[i] = 0;
5424                         continue;
5425                 }
5426                 if (stats_len_arr[i] == 4) {
5427                         /* 4-byte counter */
5428                         buf[i] = (u64)
5429                                 *(hw_stats + bnx2_stats_offset_arr[i]);
5430                         continue;
5431                 }
5432                 /* 8-byte counter */
5433                 buf[i] = (((u64) *(hw_stats +
5434                                         bnx2_stats_offset_arr[i])) << 32) +
5435                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5436         }
5437 }
5438
5439 static int
5440 bnx2_phys_id(struct net_device *dev, u32 data)
5441 {
5442         struct bnx2 *bp = netdev_priv(dev);
5443         int i;
5444         u32 save;
5445
5446         if (data == 0)
5447                 data = 2;
5448
5449         save = REG_RD(bp, BNX2_MISC_CFG);
5450         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5451
5452         for (i = 0; i < (data * 2); i++) {
5453                 if ((i % 2) == 0) {
5454                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5455                 }
5456                 else {
5457                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5458                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
5459                                 BNX2_EMAC_LED_100MB_OVERRIDE |
5460                                 BNX2_EMAC_LED_10MB_OVERRIDE |
5461                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5462                                 BNX2_EMAC_LED_TRAFFIC);
5463                 }
5464                 msleep_interruptible(500);
5465                 if (signal_pending(current))
5466                         break;
5467         }
5468         REG_WR(bp, BNX2_EMAC_LED, 0);
5469         REG_WR(bp, BNX2_MISC_CFG, save);
5470         return 0;
5471 }
5472
5473 static const struct ethtool_ops bnx2_ethtool_ops = {
5474         .get_settings           = bnx2_get_settings,
5475         .set_settings           = bnx2_set_settings,
5476         .get_drvinfo            = bnx2_get_drvinfo,
5477         .get_regs_len           = bnx2_get_regs_len,
5478         .get_regs               = bnx2_get_regs,
5479         .get_wol                = bnx2_get_wol,
5480         .set_wol                = bnx2_set_wol,
5481         .nway_reset             = bnx2_nway_reset,
5482         .get_link               = ethtool_op_get_link,
5483         .get_eeprom_len         = bnx2_get_eeprom_len,
5484         .get_eeprom             = bnx2_get_eeprom,
5485         .set_eeprom             = bnx2_set_eeprom,
5486         .get_coalesce           = bnx2_get_coalesce,
5487         .set_coalesce           = bnx2_set_coalesce,
5488         .get_ringparam          = bnx2_get_ringparam,
5489         .set_ringparam          = bnx2_set_ringparam,
5490         .get_pauseparam         = bnx2_get_pauseparam,
5491         .set_pauseparam         = bnx2_set_pauseparam,
5492         .get_rx_csum            = bnx2_get_rx_csum,
5493         .set_rx_csum            = bnx2_set_rx_csum,
5494         .get_tx_csum            = ethtool_op_get_tx_csum,
5495         .set_tx_csum            = ethtool_op_set_tx_csum,
5496         .get_sg                 = ethtool_op_get_sg,
5497         .set_sg                 = ethtool_op_set_sg,
5498 #ifdef BCM_TSO
5499         .get_tso                = ethtool_op_get_tso,
5500         .set_tso                = bnx2_set_tso,
5501 #endif
5502         .self_test_count        = bnx2_self_test_count,
5503         .self_test              = bnx2_self_test,
5504         .get_strings            = bnx2_get_strings,
5505         .phys_id                = bnx2_phys_id,
5506         .get_stats_count        = bnx2_get_stats_count,
5507         .get_ethtool_stats      = bnx2_get_ethtool_stats,
5508         .get_perm_addr          = ethtool_op_get_perm_addr,
5509 };
5510
5511 /* Called with rtnl_lock */
5512 static int
5513 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5514 {
5515         struct mii_ioctl_data *data = if_mii(ifr);
5516         struct bnx2 *bp = netdev_priv(dev);
5517         int err;
5518
5519         switch(cmd) {
5520         case SIOCGMIIPHY:
5521                 data->phy_id = bp->phy_addr;
5522
5523                 /* fallthru */
5524         case SIOCGMIIREG: {
5525                 u32 mii_regval;
5526
5527                 spin_lock_bh(&bp->phy_lock);
5528                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5529                 spin_unlock_bh(&bp->phy_lock);
5530
5531                 data->val_out = mii_regval;
5532
5533                 return err;
5534         }
5535
5536         case SIOCSMIIREG:
5537                 if (!capable(CAP_NET_ADMIN))
5538                         return -EPERM;
5539
5540                 spin_lock_bh(&bp->phy_lock);
5541                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5542                 spin_unlock_bh(&bp->phy_lock);
5543
5544                 return err;
5545
5546         default:
5547                 /* do nothing */
5548                 break;
5549         }
5550         return -EOPNOTSUPP;
5551 }
5552
5553 /* Called with rtnl_lock */
5554 static int
5555 bnx2_change_mac_addr(struct net_device *dev, void *p)
5556 {
5557         struct sockaddr *addr = p;
5558         struct bnx2 *bp = netdev_priv(dev);
5559
5560         if (!is_valid_ether_addr(addr->sa_data))
5561                 return -EINVAL;
5562
5563         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5564         if (netif_running(dev))
5565                 bnx2_set_mac_addr(bp);
5566
5567         return 0;
5568 }
5569
5570 /* Called with rtnl_lock */
5571 static int
5572 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5573 {
5574         struct bnx2 *bp = netdev_priv(dev);
5575
5576         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5577                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5578                 return -EINVAL;
5579
5580         dev->mtu = new_mtu;
5581         if (netif_running(dev)) {
5582                 bnx2_netif_stop(bp);
5583
5584                 bnx2_init_nic(bp);
5585
5586                 bnx2_netif_start(bp);
5587         }
5588         return 0;
5589 }
5590
5591 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5592 static void
5593 poll_bnx2(struct net_device *dev)
5594 {
5595         struct bnx2 *bp = netdev_priv(dev);
5596
5597         disable_irq(bp->pdev->irq);
5598         bnx2_interrupt(bp->pdev->irq, dev);
5599         enable_irq(bp->pdev->irq);
5600 }
5601 #endif
5602
5603 static int __devinit
5604 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5605 {
5606         struct bnx2 *bp;
5607         unsigned long mem_len;
5608         int rc;
5609         u32 reg;
5610
5611         SET_MODULE_OWNER(dev);
5612         SET_NETDEV_DEV(dev, &pdev->dev);
5613         bp = netdev_priv(dev);
5614
5615         bp->flags = 0;
5616         bp->phy_flags = 0;
5617
5618         /* enable device (incl. PCI PM wakeup), and bus-mastering */
5619         rc = pci_enable_device(pdev);
5620         if (rc) {
5621                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5622                 goto err_out;
5623         }
5624
5625         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5626                 dev_err(&pdev->dev,
5627                         "Cannot find PCI device base address, aborting.\n");
5628                 rc = -ENODEV;
5629                 goto err_out_disable;
5630         }
5631
5632         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5633         if (rc) {
5634                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5635                 goto err_out_disable;
5636         }
5637
5638         pci_set_master(pdev);
5639
5640         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5641         if (bp->pm_cap == 0) {
5642                 dev_err(&pdev->dev,
5643                         "Cannot find power management capability, aborting.\n");
5644                 rc = -EIO;
5645                 goto err_out_release;
5646         }
5647
5648         if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5649                 bp->flags |= USING_DAC_FLAG;
5650                 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5651                         dev_err(&pdev->dev,
5652                                 "pci_set_consistent_dma_mask failed, aborting.\n");
5653                         rc = -EIO;
5654                         goto err_out_release;
5655                 }
5656         }
5657         else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5658                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5659                 rc = -EIO;
5660                 goto err_out_release;
5661         }
5662
5663         bp->dev = dev;
5664         bp->pdev = pdev;
5665
5666         spin_lock_init(&bp->phy_lock);
5667         INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5668
5669         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5670         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5671         dev->mem_end = dev->mem_start + mem_len;
5672         dev->irq = pdev->irq;
5673
5674         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5675
5676         if (!bp->regview) {
5677                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5678                 rc = -ENOMEM;
5679                 goto err_out_release;
5680         }
5681
5682         /* Configure byte swap and enable write to the reg_window registers.
5683          * Rely on CPU to do target byte swapping on big endian systems
5684          * The chip's target access swapping will not swap all accesses
5685          */
5686         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5687                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5688                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5689
5690         bnx2_set_power_state(bp, PCI_D0);
5691
5692         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5693
5694         if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5695                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5696                 if (bp->pcix_cap == 0) {
5697                         dev_err(&pdev->dev,
5698                                 "Cannot find PCIX capability, aborting.\n");
5699                         rc = -EIO;
5700                         goto err_out_unmap;
5701                 }
5702         }
5703
5704         /* Get bus information. */
5705         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5706         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5707                 u32 clkreg;
5708
5709                 bp->flags |= PCIX_FLAG;
5710
5711                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5712
5713                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5714                 switch (clkreg) {
5715                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5716                         bp->bus_speed_mhz = 133;
5717                         break;
5718
5719                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5720                         bp->bus_speed_mhz = 100;
5721                         break;
5722
5723                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5724                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5725                         bp->bus_speed_mhz = 66;
5726                         break;
5727
5728                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5729                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5730                         bp->bus_speed_mhz = 50;
5731                         break;
5732
5733                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5734                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5735                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5736                         bp->bus_speed_mhz = 33;
5737                         break;
5738                 }
5739         }
5740         else {
5741                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5742                         bp->bus_speed_mhz = 66;
5743                 else
5744                         bp->bus_speed_mhz = 33;
5745         }
5746
5747         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5748                 bp->flags |= PCI_32BIT_FLAG;
5749
5750         /* 5706A0 may falsely detect SERR and PERR. */
5751         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5752                 reg = REG_RD(bp, PCI_COMMAND);
5753                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5754                 REG_WR(bp, PCI_COMMAND, reg);
5755         }
5756         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5757                 !(bp->flags & PCIX_FLAG)) {
5758
5759                 dev_err(&pdev->dev,
5760                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
5761                 goto err_out_unmap;
5762         }
5763
5764         bnx2_init_nvram(bp);
5765
5766         reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5767
5768         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5769             BNX2_SHM_HDR_SIGNATURE_SIG)
5770                 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5771         else
5772                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5773
5774         /* Get the permanent MAC address.  First we need to make sure the
5775          * firmware is actually running.
5776          */
5777         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5778
5779         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5780             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5781                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5782                 rc = -ENODEV;
5783                 goto err_out_unmap;
5784         }
5785
5786         bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5787
5788         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5789         bp->mac_addr[0] = (u8) (reg >> 8);
5790         bp->mac_addr[1] = (u8) reg;
5791
5792         reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5793         bp->mac_addr[2] = (u8) (reg >> 24);
5794         bp->mac_addr[3] = (u8) (reg >> 16);
5795         bp->mac_addr[4] = (u8) (reg >> 8);
5796         bp->mac_addr[5] = (u8) reg;
5797
5798         bp->tx_ring_size = MAX_TX_DESC_CNT;
5799         bnx2_set_rx_ring_size(bp, 255);
5800
5801         bp->rx_csum = 1;
5802
5803         bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5804
5805         bp->tx_quick_cons_trip_int = 20;
5806         bp->tx_quick_cons_trip = 20;
5807         bp->tx_ticks_int = 80;
5808         bp->tx_ticks = 80;
5809
5810         bp->rx_quick_cons_trip_int = 6;
5811         bp->rx_quick_cons_trip = 6;
5812         bp->rx_ticks_int = 18;
5813         bp->rx_ticks = 18;
5814
5815         bp->stats_ticks = 1000000 & 0xffff00;
5816
5817         bp->timer_interval =  HZ;
5818         bp->current_interval =  HZ;
5819
5820         bp->phy_addr = 1;
5821
5822         /* Disable WOL support if we are running on a SERDES chip. */
5823         if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5824                 bp->phy_flags |= PHY_SERDES_FLAG;
5825                 bp->flags |= NO_WOL_FLAG;
5826                 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5827                         bp->phy_addr = 2;
5828                         reg = REG_RD_IND(bp, bp->shmem_base +
5829                                          BNX2_SHARED_HW_CFG_CONFIG);
5830                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5831                                 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5832                 }
5833         }
5834
5835         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5836             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5837             (CHIP_ID(bp) == CHIP_ID_5708_B1))
5838                 bp->flags |= NO_WOL_FLAG;
5839
5840         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5841                 bp->tx_quick_cons_trip_int =
5842                         bp->tx_quick_cons_trip;
5843                 bp->tx_ticks_int = bp->tx_ticks;
5844                 bp->rx_quick_cons_trip_int =
5845                         bp->rx_quick_cons_trip;
5846                 bp->rx_ticks_int = bp->rx_ticks;
5847                 bp->comp_prod_trip_int = bp->comp_prod_trip;
5848                 bp->com_ticks_int = bp->com_ticks;
5849                 bp->cmd_ticks_int = bp->cmd_ticks;
5850         }
5851
5852         /* Disable MSI on 5706 if AMD 8132 bridge is found.
5853          *
5854          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
5855          * with byte enables disabled on the unused 32-bit word.  This is legal
5856          * but causes problems on the AMD 8132 which will eventually stop
5857          * responding after a while.
5858          *
5859          * AMD believes this incompatibility is unique to the 5706, and
5860          * prefers to locally disable MSI rather than globally disabling it
5861          * using pci_msi_quirk.
5862          */
5863         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5864                 struct pci_dev *amd_8132 = NULL;
5865
5866                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5867                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
5868                                                   amd_8132))) {
5869                         u8 rev;
5870
5871                         pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5872                         if (rev >= 0x10 && rev <= 0x13) {
5873                                 disable_msi = 1;
5874                                 pci_dev_put(amd_8132);
5875                                 break;
5876                         }
5877                 }
5878         }
5879
5880         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5881         bp->req_line_speed = 0;
5882         if (bp->phy_flags & PHY_SERDES_FLAG) {
5883                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5884
5885                 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5886                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5887                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5888                         bp->autoneg = 0;
5889                         bp->req_line_speed = bp->line_speed = SPEED_1000;
5890                         bp->req_duplex = DUPLEX_FULL;
5891                 }
5892         }
5893         else {
5894                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5895         }
5896
5897         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5898
5899         init_timer(&bp->timer);
5900         bp->timer.expires = RUN_AT(bp->timer_interval);
5901         bp->timer.data = (unsigned long) bp;
5902         bp->timer.function = bnx2_timer;
5903
5904         return 0;
5905
5906 err_out_unmap:
5907         if (bp->regview) {
5908                 iounmap(bp->regview);
5909                 bp->regview = NULL;
5910         }
5911
5912 err_out_release:
5913         pci_release_regions(pdev);
5914
5915 err_out_disable:
5916         pci_disable_device(pdev);
5917         pci_set_drvdata(pdev, NULL);
5918
5919 err_out:
5920         return rc;
5921 }
5922
5923 static int __devinit
5924 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5925 {
5926         static int version_printed = 0;
5927         struct net_device *dev = NULL;
5928         struct bnx2 *bp;
5929         int rc, i;
5930
5931         if (version_printed++ == 0)
5932                 printk(KERN_INFO "%s", version);
5933
5934         /* dev zeroed in init_etherdev */
5935         dev = alloc_etherdev(sizeof(*bp));
5936
5937         if (!dev)
5938                 return -ENOMEM;
5939
5940         rc = bnx2_init_board(pdev, dev);
5941         if (rc < 0) {
5942                 free_netdev(dev);
5943                 return rc;
5944         }
5945
5946         dev->open = bnx2_open;
5947         dev->hard_start_xmit = bnx2_start_xmit;
5948         dev->stop = bnx2_close;
5949         dev->get_stats = bnx2_get_stats;
5950         dev->set_multicast_list = bnx2_set_rx_mode;
5951         dev->do_ioctl = bnx2_ioctl;
5952         dev->set_mac_address = bnx2_change_mac_addr;
5953         dev->change_mtu = bnx2_change_mtu;
5954         dev->tx_timeout = bnx2_tx_timeout;
5955         dev->watchdog_timeo = TX_TIMEOUT;
5956 #ifdef BCM_VLAN
5957         dev->vlan_rx_register = bnx2_vlan_rx_register;
5958         dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5959 #endif
5960         dev->poll = bnx2_poll;
5961         dev->ethtool_ops = &bnx2_ethtool_ops;
5962         dev->weight = 64;
5963
5964         bp = netdev_priv(dev);
5965
5966 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5967         dev->poll_controller = poll_bnx2;
5968 #endif
5969
5970         if ((rc = register_netdev(dev))) {
5971                 dev_err(&pdev->dev, "Cannot register net device\n");
5972                 if (bp->regview)
5973                         iounmap(bp->regview);
5974                 pci_release_regions(pdev);
5975                 pci_disable_device(pdev);
5976                 pci_set_drvdata(pdev, NULL);
5977                 free_netdev(dev);
5978                 return rc;
5979         }
5980
5981         pci_set_drvdata(pdev, dev);
5982
5983         memcpy(dev->dev_addr, bp->mac_addr, 6);
5984         memcpy(dev->perm_addr, bp->mac_addr, 6);
5985         bp->name = board_info[ent->driver_data].name,
5986         printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5987                 "IRQ %d, ",
5988                 dev->name,
5989                 bp->name,
5990                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5991                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5992                 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5993                 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5994                 bp->bus_speed_mhz,
5995                 dev->base_addr,
5996                 bp->pdev->irq);
5997
5998         printk("node addr ");
5999         for (i = 0; i < 6; i++)
6000                 printk("%2.2x", dev->dev_addr[i]);
6001         printk("\n");
6002
6003         dev->features |= NETIF_F_SG;
6004         if (bp->flags & USING_DAC_FLAG)
6005                 dev->features |= NETIF_F_HIGHDMA;
6006         dev->features |= NETIF_F_IP_CSUM;
6007 #ifdef BCM_VLAN
6008         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6009 #endif
6010 #ifdef BCM_TSO
6011         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6012 #endif
6013
6014         netif_carrier_off(bp->dev);
6015
6016         return 0;
6017 }
6018
6019 static void __devexit
6020 bnx2_remove_one(struct pci_dev *pdev)
6021 {
6022         struct net_device *dev = pci_get_drvdata(pdev);
6023         struct bnx2 *bp = netdev_priv(dev);
6024
6025         flush_scheduled_work();
6026
6027         unregister_netdev(dev);
6028
6029         if (bp->regview)
6030                 iounmap(bp->regview);
6031
6032         free_netdev(dev);
6033         pci_release_regions(pdev);
6034         pci_disable_device(pdev);
6035         pci_set_drvdata(pdev, NULL);
6036 }
6037
6038 static int
6039 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6040 {
6041         struct net_device *dev = pci_get_drvdata(pdev);
6042         struct bnx2 *bp = netdev_priv(dev);
6043         u32 reset_code;
6044
6045         if (!netif_running(dev))
6046                 return 0;
6047
6048         flush_scheduled_work();
6049         bnx2_netif_stop(bp);
6050         netif_device_detach(dev);
6051         del_timer_sync(&bp->timer);
6052         if (bp->flags & NO_WOL_FLAG)
6053                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6054         else if (bp->wol)
6055                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6056         else
6057                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6058         bnx2_reset_chip(bp, reset_code);
6059         bnx2_free_skbs(bp);
6060         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6061         return 0;
6062 }
6063
6064 static int
6065 bnx2_resume(struct pci_dev *pdev)
6066 {
6067         struct net_device *dev = pci_get_drvdata(pdev);
6068         struct bnx2 *bp = netdev_priv(dev);
6069
6070         if (!netif_running(dev))
6071                 return 0;
6072
6073         bnx2_set_power_state(bp, PCI_D0);
6074         netif_device_attach(dev);
6075         bnx2_init_nic(bp);
6076         bnx2_netif_start(bp);
6077         return 0;
6078 }
6079
6080 static struct pci_driver bnx2_pci_driver = {
6081         .name           = DRV_MODULE_NAME,
6082         .id_table       = bnx2_pci_tbl,
6083         .probe          = bnx2_init_one,
6084         .remove         = __devexit_p(bnx2_remove_one),
6085         .suspend        = bnx2_suspend,
6086         .resume         = bnx2_resume,
6087 };
6088
6089 static int __init bnx2_init(void)
6090 {
6091         return pci_register_driver(&bnx2_pci_driver);
6092 }
6093
6094 static void __exit bnx2_cleanup(void)
6095 {
6096         pci_unregister_driver(&bnx2_pci_driver);
6097 }
6098
6099 module_init(bnx2_init);
6100 module_exit(bnx2_cleanup);
6101
6102
6103