1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
264 for (i = 0; i < 50; i++) {
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
321 for (i = 0; i < 50; i++) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
350 bnx2_disable_int(struct bnx2 *bp)
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
358 bnx2_enable_int(struct bnx2 *bp)
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
371 bnx2_disable_int_sync(struct bnx2 *bp)
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
379 bnx2_netif_stop(struct bnx2 *bp)
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2 *bp)
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
402 bnx2_free_mem(struct bnx2 *bp)
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
433 bnx2_alloc_mem(struct bnx2 *bp)
435 int i, status_blk_size;
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
439 if (bp->tx_buf_ring == NULL)
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
451 if (bp->rx_buf_ring == NULL)
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
477 memset(bp->status_blk, 0, bp->status_stats_size);
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
492 bnx2_report_fw_link(struct bnx2 *bp)
494 u32 fw_link_status = 0;
499 switch (bp->line_speed) {
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
548 bnx2_report_link(struct bnx2 *bp)
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
554 printk("%d Mbps ", bp->line_speed);
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
579 bnx2_report_fw_link(bp);
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
585 u32 local_adv, remote_adv;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
597 if (bp->duplex != DUPLEX_FULL) {
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
653 bp->flow_ctrl = FLOW_CTRL_TX;
659 bnx2_5708s_linkup(struct bnx2 *bp)
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
682 bp->duplex = DUPLEX_HALF;
688 bnx2_5706s_linkup(struct bnx2 *bp)
690 u32 bmcr, local_adv, remote_adv, common;
693 bp->line_speed = SPEED_1000;
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
700 bp->duplex = DUPLEX_HALF;
703 if (!(bmcr & BMCR_ANENABLE)) {
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
725 bnx2_copper_linkup(struct bnx2 *bp)
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
777 bp->line_speed = SPEED_10;
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
783 bp->duplex = DUPLEX_HALF;
791 bnx2_set_mac_link(struct bnx2 *bp)
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
809 switch (bp->line_speed) {
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
817 val |= BNX2_EMAC_MODE_PORT_MII;
820 val |= BNX2_EMAC_MODE_25G;
823 val |= BNX2_EMAC_MODE_PORT_GMII;
828 val |= BNX2_EMAC_MODE_PORT_GMII;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
858 bnx2_set_link(struct bnx2 *bp)
863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
868 link_up = bp->link_up;
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
881 bmsr &= ~BMSR_LSTATUS;
884 if (bmsr & BMSR_LSTATUS) {
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
894 bnx2_copper_linkup(bp);
896 bnx2_resolve_flow_ctrl(bp);
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
919 bnx2_set_mac_link(bp);
925 bnx2_reset_phy(struct bnx2 *bp)
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
932 #define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
936 bnx2_read_phy(bp, MII_BMCR, ®);
937 if (!(reg & BMCR_RESET)) {
942 if (i == PHY_RESET_MAX_WAIT) {
949 bnx2_phy_get_pause_adv(struct bnx2 *bp)
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
960 adv = ADVERTISE_PAUSE_CAP;
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
968 adv = ADVERTISE_PAUSE_ASYM;
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
983 bnx2_setup_serdes_phy(struct bnx2 *bp)
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
990 int force_link_down = 0;
992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1015 if (bp->req_duplex == DUPLEX_FULL) {
1016 adv |= ADVERTISE_1000XFULL;
1017 new_bmcr |= BMCR_FULLDPLX;
1020 adv |= ADVERTISE_1000XHALF;
1021 new_bmcr &= ~BMCR_FULLDPLX;
1023 if ((new_bmcr != bmcr) || (force_link_down)) {
1024 /* Force a link down visible on the other side */
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
1029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1033 netif_carrier_off(bp->dev);
1034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1035 bnx2_report_link(bp);
1037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1062 spin_unlock_bh(&bp->phy_lock);
1064 spin_lock_bh(&bp->phy_lock);
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
1086 #define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1089 #define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1094 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1097 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1100 bnx2_setup_copper_phy(struct bnx2 *bp)
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
1130 new_adv_reg |= ADVERTISE_CSMA;
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1160 if (new_bmcr != bmcr) {
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1165 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 if (bmsr & BMSR_LSTATUS) {
1168 /* Force link down */
1169 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1172 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1175 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1178 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1180 /* Normally, the new speed is setup after the link has
1181 * gone down and up again. In some cases, link will not go
1182 * down so we need to set up the new speed here.
1184 if (bmsr & BMSR_LSTATUS) {
1185 bp->line_speed = bp->req_line_speed;
1186 bp->duplex = bp->req_duplex;
1187 bnx2_resolve_flow_ctrl(bp);
1188 bnx2_set_mac_link(bp);
1195 bnx2_setup_phy(struct bnx2 *bp)
1197 if (bp->loopback == MAC_LOOPBACK)
1200 if (bp->phy_flags & PHY_SERDES_FLAG) {
1201 return (bnx2_setup_serdes_phy(bp));
1204 return (bnx2_setup_copper_phy(bp));
1209 bnx2_init_5708s_phy(struct bnx2 *bp)
1213 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1214 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1215 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1217 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1218 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1219 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1221 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1222 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1223 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1225 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1226 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1227 val |= BCM5708S_UP1_2G5;
1228 bnx2_write_phy(bp, BCM5708S_UP1, val);
1231 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1233 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1234 /* increase tx signal amplitude */
1235 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1236 BCM5708S_BLK_ADDR_TX_MISC);
1237 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1238 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1239 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1240 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1243 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1244 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1249 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1250 BNX2_SHARED_HW_CFG_CONFIG);
1251 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1252 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1253 BCM5708S_BLK_ADDR_TX_MISC);
1254 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1255 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1256 BCM5708S_BLK_ADDR_DIG);
1263 bnx2_init_5706s_phy(struct bnx2 *bp)
1265 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1267 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1268 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1271 if (bp->dev->mtu > 1500) {
1274 /* Set extended packet length bit */
1275 bnx2_write_phy(bp, 0x18, 0x7);
1276 bnx2_read_phy(bp, 0x18, &val);
1277 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1279 bnx2_write_phy(bp, 0x1c, 0x6c00);
1280 bnx2_read_phy(bp, 0x1c, &val);
1281 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1286 bnx2_write_phy(bp, 0x18, 0x7);
1287 bnx2_read_phy(bp, 0x18, &val);
1288 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1290 bnx2_write_phy(bp, 0x1c, 0x6c00);
1291 bnx2_read_phy(bp, 0x1c, &val);
1292 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1299 bnx2_init_copper_phy(struct bnx2 *bp)
1303 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1305 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1306 bnx2_write_phy(bp, 0x18, 0x0c00);
1307 bnx2_write_phy(bp, 0x17, 0x000a);
1308 bnx2_write_phy(bp, 0x15, 0x310b);
1309 bnx2_write_phy(bp, 0x17, 0x201f);
1310 bnx2_write_phy(bp, 0x15, 0x9506);
1311 bnx2_write_phy(bp, 0x17, 0x401f);
1312 bnx2_write_phy(bp, 0x15, 0x14e2);
1313 bnx2_write_phy(bp, 0x18, 0x0400);
1316 if (bp->dev->mtu > 1500) {
1317 /* Set extended packet length bit */
1318 bnx2_write_phy(bp, 0x18, 0x7);
1319 bnx2_read_phy(bp, 0x18, &val);
1320 bnx2_write_phy(bp, 0x18, val | 0x4000);
1322 bnx2_read_phy(bp, 0x10, &val);
1323 bnx2_write_phy(bp, 0x10, val | 0x1);
1326 bnx2_write_phy(bp, 0x18, 0x7);
1327 bnx2_read_phy(bp, 0x18, &val);
1328 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1330 bnx2_read_phy(bp, 0x10, &val);
1331 bnx2_write_phy(bp, 0x10, val & ~0x1);
1334 /* ethernet@wirespeed */
1335 bnx2_write_phy(bp, 0x18, 0x7007);
1336 bnx2_read_phy(bp, 0x18, &val);
1337 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1343 bnx2_init_phy(struct bnx2 *bp)
1348 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1349 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1351 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1355 bnx2_read_phy(bp, MII_PHYSID1, &val);
1356 bp->phy_id = val << 16;
1357 bnx2_read_phy(bp, MII_PHYSID2, &val);
1358 bp->phy_id |= val & 0xffff;
1360 if (bp->phy_flags & PHY_SERDES_FLAG) {
1361 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1362 rc = bnx2_init_5706s_phy(bp);
1363 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1364 rc = bnx2_init_5708s_phy(bp);
1367 rc = bnx2_init_copper_phy(bp);
1376 bnx2_set_mac_loopback(struct bnx2 *bp)
1380 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1381 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1382 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1383 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1388 static int bnx2_test_link(struct bnx2 *);
1391 bnx2_set_phy_loopback(struct bnx2 *bp)
1396 spin_lock_bh(&bp->phy_lock);
1397 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1399 spin_unlock_bh(&bp->phy_lock);
1403 for (i = 0; i < 10; i++) {
1404 if (bnx2_test_link(bp) == 0)
1409 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1410 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1411 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1412 BNX2_EMAC_MODE_25G);
1414 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1415 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1421 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1427 msg_data |= bp->fw_wr_seq;
1429 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1431 /* wait for an acknowledgement. */
1432 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1435 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1437 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1440 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1443 /* If we timed out, inform the firmware that this is the case. */
1444 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1446 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1449 msg_data &= ~BNX2_DRV_MSG_CODE;
1450 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1452 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1457 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1464 bnx2_init_context(struct bnx2 *bp)
1470 u32 vcid_addr, pcid_addr, offset;
1474 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1477 vcid_addr = GET_PCID_ADDR(vcid);
1479 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1484 pcid_addr = GET_PCID_ADDR(new_vcid);
1487 vcid_addr = GET_CID_ADDR(vcid);
1488 pcid_addr = vcid_addr;
1491 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1492 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1494 /* Zero out the context. */
1495 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1496 CTX_WR(bp, 0x00, offset, 0);
1499 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1500 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1505 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1511 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1512 if (good_mbuf == NULL) {
1513 printk(KERN_ERR PFX "Failed to allocate memory in "
1514 "bnx2_alloc_bad_rbuf\n");
1518 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1519 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1523 /* Allocate a bunch of mbufs and save the good ones in an array. */
1524 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1525 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1526 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1528 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1530 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1532 /* The addresses with Bit 9 set are bad memory blocks. */
1533 if (!(val & (1 << 9))) {
1534 good_mbuf[good_mbuf_cnt] = (u16) val;
1538 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1541 /* Free the good ones back to the mbuf pool thus discarding
1542 * all the bad ones. */
1543 while (good_mbuf_cnt) {
1546 val = good_mbuf[good_mbuf_cnt];
1547 val = (val << 9) | val | 1;
1549 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1556 bnx2_set_mac_addr(struct bnx2 *bp)
1559 u8 *mac_addr = bp->dev->dev_addr;
1561 val = (mac_addr[0] << 8) | mac_addr[1];
1563 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1565 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1566 (mac_addr[4] << 8) | mac_addr[5];
1568 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1572 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1574 struct sk_buff *skb;
1575 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1577 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1578 unsigned long align;
1580 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1585 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1586 skb_reserve(skb, 8 - align);
1589 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1590 PCI_DMA_FROMDEVICE);
1593 pci_unmap_addr_set(rx_buf, mapping, mapping);
1595 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1596 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1598 bp->rx_prod_bseq += bp->rx_buf_use_size;
1604 bnx2_phy_int(struct bnx2 *bp)
1606 u32 new_link_state, old_link_state;
1608 new_link_state = bp->status_blk->status_attn_bits &
1609 STATUS_ATTN_BITS_LINK_STATE;
1610 old_link_state = bp->status_blk->status_attn_bits_ack &
1611 STATUS_ATTN_BITS_LINK_STATE;
1612 if (new_link_state != old_link_state) {
1613 if (new_link_state) {
1614 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1615 STATUS_ATTN_BITS_LINK_STATE);
1618 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1619 STATUS_ATTN_BITS_LINK_STATE);
1626 bnx2_tx_int(struct bnx2 *bp)
1628 struct status_block *sblk = bp->status_blk;
1629 u16 hw_cons, sw_cons, sw_ring_cons;
1632 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1633 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1636 sw_cons = bp->tx_cons;
1638 while (sw_cons != hw_cons) {
1639 struct sw_bd *tx_buf;
1640 struct sk_buff *skb;
1643 sw_ring_cons = TX_RING_IDX(sw_cons);
1645 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1648 /* partial BD completions possible with TSO packets */
1649 if (skb_is_gso(skb)) {
1650 u16 last_idx, last_ring_idx;
1652 last_idx = sw_cons +
1653 skb_shinfo(skb)->nr_frags + 1;
1654 last_ring_idx = sw_ring_cons +
1655 skb_shinfo(skb)->nr_frags + 1;
1656 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1659 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1664 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1665 skb_headlen(skb), PCI_DMA_TODEVICE);
1668 last = skb_shinfo(skb)->nr_frags;
1670 for (i = 0; i < last; i++) {
1671 sw_cons = NEXT_TX_BD(sw_cons);
1673 pci_unmap_page(bp->pdev,
1675 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1677 skb_shinfo(skb)->frags[i].size,
1681 sw_cons = NEXT_TX_BD(sw_cons);
1683 tx_free_bd += last + 1;
1687 hw_cons = bp->hw_tx_cons =
1688 sblk->status_tx_quick_consumer_index0;
1690 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1695 bp->tx_cons = sw_cons;
1696 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1697 * before checking for netif_queue_stopped(). Without the
1698 * memory barrier, there is a small possibility that bnx2_start_xmit()
1699 * will miss it and cause the queue to be stopped forever.
1703 if (unlikely(netif_queue_stopped(bp->dev)) &&
1704 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1705 netif_tx_lock(bp->dev);
1706 if ((netif_queue_stopped(bp->dev)) &&
1707 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1708 netif_wake_queue(bp->dev);
1709 netif_tx_unlock(bp->dev);
1714 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1717 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1718 struct rx_bd *cons_bd, *prod_bd;
1720 cons_rx_buf = &bp->rx_buf_ring[cons];
1721 prod_rx_buf = &bp->rx_buf_ring[prod];
1723 pci_dma_sync_single_for_device(bp->pdev,
1724 pci_unmap_addr(cons_rx_buf, mapping),
1725 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1727 bp->rx_prod_bseq += bp->rx_buf_use_size;
1729 prod_rx_buf->skb = skb;
1734 pci_unmap_addr_set(prod_rx_buf, mapping,
1735 pci_unmap_addr(cons_rx_buf, mapping));
1737 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1738 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1739 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1740 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1744 bnx2_rx_int(struct bnx2 *bp, int budget)
1746 struct status_block *sblk = bp->status_blk;
1747 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1748 struct l2_fhdr *rx_hdr;
1751 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1752 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1755 sw_cons = bp->rx_cons;
1756 sw_prod = bp->rx_prod;
1758 /* Memory barrier necessary as speculative reads of the rx
1759 * buffer can be ahead of the index in the status block
1762 while (sw_cons != hw_cons) {
1765 struct sw_bd *rx_buf;
1766 struct sk_buff *skb;
1767 dma_addr_t dma_addr;
1769 sw_ring_cons = RX_RING_IDX(sw_cons);
1770 sw_ring_prod = RX_RING_IDX(sw_prod);
1772 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1777 dma_addr = pci_unmap_addr(rx_buf, mapping);
1779 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1780 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1782 rx_hdr = (struct l2_fhdr *) skb->data;
1783 len = rx_hdr->l2_fhdr_pkt_len - 4;
1785 if ((status = rx_hdr->l2_fhdr_status) &
1786 (L2_FHDR_ERRORS_BAD_CRC |
1787 L2_FHDR_ERRORS_PHY_DECODE |
1788 L2_FHDR_ERRORS_ALIGNMENT |
1789 L2_FHDR_ERRORS_TOO_SHORT |
1790 L2_FHDR_ERRORS_GIANT_FRAME)) {
1795 /* Since we don't have a jumbo ring, copy small packets
1798 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1799 struct sk_buff *new_skb;
1801 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1802 if (new_skb == NULL)
1806 memcpy(new_skb->data,
1807 skb->data + bp->rx_offset - 2,
1810 skb_reserve(new_skb, 2);
1811 skb_put(new_skb, len);
1813 bnx2_reuse_rx_skb(bp, skb,
1814 sw_ring_cons, sw_ring_prod);
1818 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1819 pci_unmap_single(bp->pdev, dma_addr,
1820 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1822 skb_reserve(skb, bp->rx_offset);
1827 bnx2_reuse_rx_skb(bp, skb,
1828 sw_ring_cons, sw_ring_prod);
1832 skb->protocol = eth_type_trans(skb, bp->dev);
1834 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1835 (ntohs(skb->protocol) != 0x8100)) {
1842 skb->ip_summed = CHECKSUM_NONE;
1844 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1845 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1847 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1848 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1849 skb->ip_summed = CHECKSUM_UNNECESSARY;
1853 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1854 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1855 rx_hdr->l2_fhdr_vlan_tag);
1859 netif_receive_skb(skb);
1861 bp->dev->last_rx = jiffies;
1865 sw_cons = NEXT_RX_BD(sw_cons);
1866 sw_prod = NEXT_RX_BD(sw_prod);
1868 if ((rx_pkt == budget))
1871 /* Refresh hw_cons to see if there is new work */
1872 if (sw_cons == hw_cons) {
1873 hw_cons = bp->hw_rx_cons =
1874 sblk->status_rx_quick_consumer_index0;
1875 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1880 bp->rx_cons = sw_cons;
1881 bp->rx_prod = sw_prod;
1883 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1885 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1893 /* MSI ISR - The only difference between this and the INTx ISR
1894 * is that the MSI interrupt is always serviced.
1897 bnx2_msi(int irq, void *dev_instance)
1899 struct net_device *dev = dev_instance;
1900 struct bnx2 *bp = netdev_priv(dev);
1902 prefetch(bp->status_blk);
1903 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1904 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1905 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1907 /* Return here if interrupt is disabled. */
1908 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1911 netif_rx_schedule(dev);
1917 bnx2_interrupt(int irq, void *dev_instance)
1919 struct net_device *dev = dev_instance;
1920 struct bnx2 *bp = netdev_priv(dev);
1922 /* When using INTx, it is possible for the interrupt to arrive
1923 * at the CPU before the status block posted prior to the
1924 * interrupt. Reading a register will flush the status block.
1925 * When using MSI, the MSI message will always complete after
1926 * the status block write.
1928 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1929 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1930 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1933 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1934 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1935 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1937 /* Return here if interrupt is shared and is disabled. */
1938 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1941 netif_rx_schedule(dev);
1947 bnx2_has_work(struct bnx2 *bp)
1949 struct status_block *sblk = bp->status_blk;
1951 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1952 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1955 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1963 bnx2_poll(struct net_device *dev, int *budget)
1965 struct bnx2 *bp = netdev_priv(dev);
1967 if ((bp->status_blk->status_attn_bits &
1968 STATUS_ATTN_BITS_LINK_STATE) !=
1969 (bp->status_blk->status_attn_bits_ack &
1970 STATUS_ATTN_BITS_LINK_STATE)) {
1972 spin_lock(&bp->phy_lock);
1974 spin_unlock(&bp->phy_lock);
1976 /* This is needed to take care of transient status
1977 * during link changes.
1979 REG_WR(bp, BNX2_HC_COMMAND,
1980 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1981 REG_RD(bp, BNX2_HC_COMMAND);
1984 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1987 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1988 int orig_budget = *budget;
1991 if (orig_budget > dev->quota)
1992 orig_budget = dev->quota;
1994 work_done = bnx2_rx_int(bp, orig_budget);
1995 *budget -= work_done;
1996 dev->quota -= work_done;
1999 bp->last_status_idx = bp->status_blk->status_idx;
2002 if (!bnx2_has_work(bp)) {
2003 netif_rx_complete(dev);
2004 if (likely(bp->flags & USING_MSI_FLAG)) {
2005 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2006 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2007 bp->last_status_idx);
2010 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2011 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2012 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2013 bp->last_status_idx);
2015 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2016 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2017 bp->last_status_idx);
2024 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2025 * from set_multicast.
2028 bnx2_set_rx_mode(struct net_device *dev)
2030 struct bnx2 *bp = netdev_priv(dev);
2031 u32 rx_mode, sort_mode;
2034 spin_lock_bh(&bp->phy_lock);
2036 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2037 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2038 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2040 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2041 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2043 if (!(bp->flags & ASF_ENABLE_FLAG))
2044 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2046 if (dev->flags & IFF_PROMISC) {
2047 /* Promiscuous mode. */
2048 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2049 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2050 BNX2_RPM_SORT_USER0_PROM_VLAN;
2052 else if (dev->flags & IFF_ALLMULTI) {
2053 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2054 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2057 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2060 /* Accept one or more multicast(s). */
2061 struct dev_mc_list *mclist;
2062 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2067 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2069 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2070 i++, mclist = mclist->next) {
2072 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2074 regidx = (bit & 0xe0) >> 5;
2076 mc_filter[regidx] |= (1 << bit);
2079 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2080 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2084 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2087 if (rx_mode != bp->rx_mode) {
2088 bp->rx_mode = rx_mode;
2089 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2094 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2096 spin_unlock_bh(&bp->phy_lock);
2099 #define FW_BUF_SIZE 0x8000
2102 bnx2_gunzip_init(struct bnx2 *bp)
2104 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2107 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2110 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2111 if (bp->strm->workspace == NULL)
2121 vfree(bp->gunzip_buf);
2122 bp->gunzip_buf = NULL;
2125 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2126 "uncompression.\n", bp->dev->name);
2131 bnx2_gunzip_end(struct bnx2 *bp)
2133 kfree(bp->strm->workspace);
2138 if (bp->gunzip_buf) {
2139 vfree(bp->gunzip_buf);
2140 bp->gunzip_buf = NULL;
2145 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2149 /* check gzip header */
2150 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2156 if (zbuf[3] & FNAME)
2157 while ((zbuf[n++] != 0) && (n < len));
2159 bp->strm->next_in = zbuf + n;
2160 bp->strm->avail_in = len - n;
2161 bp->strm->next_out = bp->gunzip_buf;
2162 bp->strm->avail_out = FW_BUF_SIZE;
2164 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2168 rc = zlib_inflate(bp->strm, Z_FINISH);
2170 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2171 *outbuf = bp->gunzip_buf;
2173 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2174 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2175 bp->dev->name, bp->strm->msg);
2177 zlib_inflateEnd(bp->strm);
2179 if (rc == Z_STREAM_END)
2186 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2193 for (i = 0; i < rv2p_code_len; i += 8) {
2194 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2196 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2199 if (rv2p_proc == RV2P_PROC1) {
2200 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2201 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2204 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2205 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2209 /* Reset the processor, un-stall is done later. */
2210 if (rv2p_proc == RV2P_PROC1) {
2211 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2214 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2219 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2225 val = REG_RD_IND(bp, cpu_reg->mode);
2226 val |= cpu_reg->mode_value_halt;
2227 REG_WR_IND(bp, cpu_reg->mode, val);
2228 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2230 /* Load the Text area. */
2231 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2235 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2236 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2240 /* Load the Data area. */
2241 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2245 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2246 REG_WR_IND(bp, offset, fw->data[j]);
2250 /* Load the SBSS area. */
2251 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2255 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2256 REG_WR_IND(bp, offset, fw->sbss[j]);
2260 /* Load the BSS area. */
2261 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2265 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2266 REG_WR_IND(bp, offset, fw->bss[j]);
2270 /* Load the Read-Only area. */
2271 offset = cpu_reg->spad_base +
2272 (fw->rodata_addr - cpu_reg->mips_view_base);
2276 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2277 REG_WR_IND(bp, offset, fw->rodata[j]);
2281 /* Clear the pre-fetch instruction. */
2282 REG_WR_IND(bp, cpu_reg->inst, 0);
2283 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2285 /* Start the CPU. */
2286 val = REG_RD_IND(bp, cpu_reg->mode);
2287 val &= ~cpu_reg->mode_value_halt;
2288 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2289 REG_WR_IND(bp, cpu_reg->mode, val);
2293 bnx2_init_cpus(struct bnx2 *bp)
2295 struct cpu_reg cpu_reg;
2301 if ((rc = bnx2_gunzip_init(bp)) != 0)
2304 /* Initialize the RV2P processor. */
2305 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2310 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2312 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2317 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2319 /* Initialize the RX Processor. */
2320 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2321 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2322 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2323 cpu_reg.state = BNX2_RXP_CPU_STATE;
2324 cpu_reg.state_value_clear = 0xffffff;
2325 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2326 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2327 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2328 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2329 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2330 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2331 cpu_reg.mips_view_base = 0x8000000;
2333 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2334 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2335 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2336 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2338 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2339 fw.text_len = bnx2_RXP_b06FwTextLen;
2342 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2349 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2350 fw.data_len = bnx2_RXP_b06FwDataLen;
2352 fw.data = bnx2_RXP_b06FwData;
2354 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2355 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2357 fw.sbss = bnx2_RXP_b06FwSbss;
2359 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2360 fw.bss_len = bnx2_RXP_b06FwBssLen;
2362 fw.bss = bnx2_RXP_b06FwBss;
2364 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2365 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2366 fw.rodata_index = 0;
2367 fw.rodata = bnx2_RXP_b06FwRodata;
2369 load_cpu_fw(bp, &cpu_reg, &fw);
2371 /* Initialize the TX Processor. */
2372 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2373 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2374 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2375 cpu_reg.state = BNX2_TXP_CPU_STATE;
2376 cpu_reg.state_value_clear = 0xffffff;
2377 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2378 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2379 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2380 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2381 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2382 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2383 cpu_reg.mips_view_base = 0x8000000;
2385 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2386 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2387 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2388 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2390 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2391 fw.text_len = bnx2_TXP_b06FwTextLen;
2394 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2401 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2402 fw.data_len = bnx2_TXP_b06FwDataLen;
2404 fw.data = bnx2_TXP_b06FwData;
2406 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2407 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2409 fw.sbss = bnx2_TXP_b06FwSbss;
2411 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2412 fw.bss_len = bnx2_TXP_b06FwBssLen;
2414 fw.bss = bnx2_TXP_b06FwBss;
2416 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2417 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2418 fw.rodata_index = 0;
2419 fw.rodata = bnx2_TXP_b06FwRodata;
2421 load_cpu_fw(bp, &cpu_reg, &fw);
2423 /* Initialize the TX Patch-up Processor. */
2424 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2425 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2426 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2427 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2428 cpu_reg.state_value_clear = 0xffffff;
2429 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2430 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2431 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2432 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2433 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2434 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2435 cpu_reg.mips_view_base = 0x8000000;
2437 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2438 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2439 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2440 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2442 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2443 fw.text_len = bnx2_TPAT_b06FwTextLen;
2446 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2453 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2454 fw.data_len = bnx2_TPAT_b06FwDataLen;
2456 fw.data = bnx2_TPAT_b06FwData;
2458 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2459 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2461 fw.sbss = bnx2_TPAT_b06FwSbss;
2463 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2464 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2466 fw.bss = bnx2_TPAT_b06FwBss;
2468 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2469 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2470 fw.rodata_index = 0;
2471 fw.rodata = bnx2_TPAT_b06FwRodata;
2473 load_cpu_fw(bp, &cpu_reg, &fw);
2475 /* Initialize the Completion Processor. */
2476 cpu_reg.mode = BNX2_COM_CPU_MODE;
2477 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2478 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2479 cpu_reg.state = BNX2_COM_CPU_STATE;
2480 cpu_reg.state_value_clear = 0xffffff;
2481 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2482 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2483 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2484 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2485 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2486 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2487 cpu_reg.mips_view_base = 0x8000000;
2489 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2490 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2491 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2492 fw.start_addr = bnx2_COM_b06FwStartAddr;
2494 fw.text_addr = bnx2_COM_b06FwTextAddr;
2495 fw.text_len = bnx2_COM_b06FwTextLen;
2498 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2505 fw.data_addr = bnx2_COM_b06FwDataAddr;
2506 fw.data_len = bnx2_COM_b06FwDataLen;
2508 fw.data = bnx2_COM_b06FwData;
2510 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2511 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2513 fw.sbss = bnx2_COM_b06FwSbss;
2515 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2516 fw.bss_len = bnx2_COM_b06FwBssLen;
2518 fw.bss = bnx2_COM_b06FwBss;
2520 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2521 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2522 fw.rodata_index = 0;
2523 fw.rodata = bnx2_COM_b06FwRodata;
2525 load_cpu_fw(bp, &cpu_reg, &fw);
2528 bnx2_gunzip_end(bp);
2533 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2537 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2543 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2544 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2545 PCI_PM_CTRL_PME_STATUS);
2547 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2548 /* delay required during transition out of D3hot */
2551 val = REG_RD(bp, BNX2_EMAC_MODE);
2552 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2553 val &= ~BNX2_EMAC_MODE_MPKT;
2554 REG_WR(bp, BNX2_EMAC_MODE, val);
2556 val = REG_RD(bp, BNX2_RPM_CONFIG);
2557 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2558 REG_WR(bp, BNX2_RPM_CONFIG, val);
2569 autoneg = bp->autoneg;
2570 advertising = bp->advertising;
2572 bp->autoneg = AUTONEG_SPEED;
2573 bp->advertising = ADVERTISED_10baseT_Half |
2574 ADVERTISED_10baseT_Full |
2575 ADVERTISED_100baseT_Half |
2576 ADVERTISED_100baseT_Full |
2579 bnx2_setup_copper_phy(bp);
2581 bp->autoneg = autoneg;
2582 bp->advertising = advertising;
2584 bnx2_set_mac_addr(bp);
2586 val = REG_RD(bp, BNX2_EMAC_MODE);
2588 /* Enable port mode. */
2589 val &= ~BNX2_EMAC_MODE_PORT;
2590 val |= BNX2_EMAC_MODE_PORT_MII |
2591 BNX2_EMAC_MODE_MPKT_RCVD |
2592 BNX2_EMAC_MODE_ACPI_RCVD |
2593 BNX2_EMAC_MODE_MPKT;
2595 REG_WR(bp, BNX2_EMAC_MODE, val);
2597 /* receive all multicast */
2598 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2599 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2602 REG_WR(bp, BNX2_EMAC_RX_MODE,
2603 BNX2_EMAC_RX_MODE_SORT_MODE);
2605 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2606 BNX2_RPM_SORT_USER0_MC_EN;
2607 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2608 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2610 BNX2_RPM_SORT_USER0_ENA);
2612 /* Need to enable EMAC and RPM for WOL. */
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2615 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2616 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2618 val = REG_RD(bp, BNX2_RPM_CONFIG);
2619 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2620 REG_WR(bp, BNX2_RPM_CONFIG, val);
2622 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2625 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2628 if (!(bp->flags & NO_WOL_FLAG))
2629 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2631 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2632 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2633 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2642 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2644 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2647 /* No more memory access after this point until
2648 * device is brought back to D0.
2660 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2665 /* Request access to the flash interface. */
2666 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2667 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2668 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2669 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2675 if (j >= NVRAM_TIMEOUT_COUNT)
2682 bnx2_release_nvram_lock(struct bnx2 *bp)
2687 /* Relinquish nvram interface. */
2688 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2690 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2691 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2692 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2698 if (j >= NVRAM_TIMEOUT_COUNT)
2706 bnx2_enable_nvram_write(struct bnx2 *bp)
2710 val = REG_RD(bp, BNX2_MISC_CFG);
2711 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2713 if (!bp->flash_info->buffered) {
2716 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2717 REG_WR(bp, BNX2_NVM_COMMAND,
2718 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2720 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE)
2728 if (j >= NVRAM_TIMEOUT_COUNT)
2735 bnx2_disable_nvram_write(struct bnx2 *bp)
2739 val = REG_RD(bp, BNX2_MISC_CFG);
2740 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2745 bnx2_enable_nvram_access(struct bnx2 *bp)
2749 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2750 /* Enable both bits, even on read. */
2751 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2752 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2756 bnx2_disable_nvram_access(struct bnx2 *bp)
2760 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2761 /* Disable both bits, even after read. */
2762 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2763 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2764 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2768 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2773 if (bp->flash_info->buffered)
2774 /* Buffered flash, no erase needed */
2777 /* Build an erase command */
2778 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2779 BNX2_NVM_COMMAND_DOIT;
2781 /* Need to clear DONE bit separately. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2784 /* Address of the NVRAM to read from. */
2785 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2787 /* Issue an erase command. */
2788 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2790 /* Wait for completion. */
2791 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2796 val = REG_RD(bp, BNX2_NVM_COMMAND);
2797 if (val & BNX2_NVM_COMMAND_DONE)
2801 if (j >= NVRAM_TIMEOUT_COUNT)
2808 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2813 /* Build the command word. */
2814 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2816 /* Calculate an offset of a buffered flash. */
2817 if (bp->flash_info->buffered) {
2818 offset = ((offset / bp->flash_info->page_size) <<
2819 bp->flash_info->page_bits) +
2820 (offset % bp->flash_info->page_size);
2823 /* Need to clear DONE bit separately. */
2824 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2826 /* Address of the NVRAM to read from. */
2827 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2829 /* Issue a read command. */
2830 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2832 /* Wait for completion. */
2833 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2838 val = REG_RD(bp, BNX2_NVM_COMMAND);
2839 if (val & BNX2_NVM_COMMAND_DONE) {
2840 val = REG_RD(bp, BNX2_NVM_READ);
2842 val = be32_to_cpu(val);
2843 memcpy(ret_val, &val, 4);
2847 if (j >= NVRAM_TIMEOUT_COUNT)
2855 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2860 /* Build the command word. */
2861 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2863 /* Calculate an offset of a buffered flash. */
2864 if (bp->flash_info->buffered) {
2865 offset = ((offset / bp->flash_info->page_size) <<
2866 bp->flash_info->page_bits) +
2867 (offset % bp->flash_info->page_size);
2870 /* Need to clear DONE bit separately. */
2871 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2873 memcpy(&val32, val, 4);
2874 val32 = cpu_to_be32(val32);
2876 /* Write the data. */
2877 REG_WR(bp, BNX2_NVM_WRITE, val32);
2879 /* Address of the NVRAM to write to. */
2880 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2882 /* Issue the write command. */
2883 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2885 /* Wait for completion. */
2886 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2889 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2892 if (j >= NVRAM_TIMEOUT_COUNT)
2899 bnx2_init_nvram(struct bnx2 *bp)
2902 int j, entry_count, rc;
2903 struct flash_spec *flash;
2905 /* Determine the selected interface. */
2906 val = REG_RD(bp, BNX2_NVM_CFG1);
2908 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2911 if (val & 0x40000000) {
2913 /* Flash interface has been reconfigured */
2914 for (j = 0, flash = &flash_table[0]; j < entry_count;
2916 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2917 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2918 bp->flash_info = flash;
2925 /* Not yet been reconfigured */
2927 if (val & (1 << 23))
2928 mask = FLASH_BACKUP_STRAP_MASK;
2930 mask = FLASH_STRAP_MASK;
2932 for (j = 0, flash = &flash_table[0]; j < entry_count;
2935 if ((val & mask) == (flash->strapping & mask)) {
2936 bp->flash_info = flash;
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2945 /* Reconfigure the flash interface */
2946 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2947 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2948 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2949 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2951 /* Disable access to flash interface */
2952 bnx2_disable_nvram_access(bp);
2953 bnx2_release_nvram_lock(bp);
2958 } /* if (val & 0x40000000) */
2960 if (j == entry_count) {
2961 bp->flash_info = NULL;
2962 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2966 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2967 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2969 bp->flash_size = val;
2971 bp->flash_size = bp->flash_info->total_size;
2977 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2981 u32 cmd_flags, offset32, len32, extra;
2986 /* Request access to the flash interface. */
2987 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2990 /* Enable access to flash interface */
2991 bnx2_enable_nvram_access(bp);
3004 pre_len = 4 - (offset & 3);
3006 if (pre_len >= len32) {
3008 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3009 BNX2_NVM_COMMAND_LAST;
3012 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3015 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3020 memcpy(ret_buf, buf + (offset & 3), pre_len);
3027 extra = 4 - (len32 & 3);
3028 len32 = (len32 + 4) & ~3;
3035 cmd_flags = BNX2_NVM_COMMAND_LAST;
3037 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3038 BNX2_NVM_COMMAND_LAST;
3040 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3042 memcpy(ret_buf, buf, 4 - extra);
3044 else if (len32 > 0) {
3047 /* Read the first word. */
3051 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3053 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3055 /* Advance to the next dword. */
3060 while (len32 > 4 && rc == 0) {
3061 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3063 /* Advance to the next dword. */
3072 cmd_flags = BNX2_NVM_COMMAND_LAST;
3073 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3075 memcpy(ret_buf, buf, 4 - extra);
3078 /* Disable access to flash interface */
3079 bnx2_disable_nvram_access(bp);
3081 bnx2_release_nvram_lock(bp);
3087 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3090 u32 written, offset32, len32;
3091 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3093 int align_start, align_end;
3098 align_start = align_end = 0;
3100 if ((align_start = (offset32 & 3))) {
3102 len32 += align_start;
3103 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3108 if ((len32 > 4) || !align_start) {
3109 align_end = 4 - (len32 & 3);
3111 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3118 if (align_start || align_end) {
3119 buf = kmalloc(len32, GFP_KERNEL);
3123 memcpy(buf, start, 4);
3126 memcpy(buf + len32 - 4, end, 4);
3128 memcpy(buf + align_start, data_buf, buf_size);
3131 if (bp->flash_info->buffered == 0) {
3132 flash_buffer = kmalloc(264, GFP_KERNEL);
3133 if (flash_buffer == NULL) {
3135 goto nvram_write_end;
3140 while ((written < len32) && (rc == 0)) {
3141 u32 page_start, page_end, data_start, data_end;
3142 u32 addr, cmd_flags;
3145 /* Find the page_start addr */
3146 page_start = offset32 + written;
3147 page_start -= (page_start % bp->flash_info->page_size);
3148 /* Find the page_end addr */
3149 page_end = page_start + bp->flash_info->page_size;
3150 /* Find the data_start addr */
3151 data_start = (written == 0) ? offset32 : page_start;
3152 /* Find the data_end addr */
3153 data_end = (page_end > offset32 + len32) ?
3154 (offset32 + len32) : page_end;
3156 /* Request access to the flash interface. */
3157 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3158 goto nvram_write_end;
3160 /* Enable access to flash interface */
3161 bnx2_enable_nvram_access(bp);
3163 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3164 if (bp->flash_info->buffered == 0) {
3167 /* Read the whole page into the buffer
3168 * (non-buffer flash only) */
3169 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3170 if (j == (bp->flash_info->page_size - 4)) {
3171 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3173 rc = bnx2_nvram_read_dword(bp,
3179 goto nvram_write_end;
3185 /* Enable writes to flash interface (unlock write-protect) */
3186 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3187 goto nvram_write_end;
3189 /* Erase the page */
3190 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3191 goto nvram_write_end;
3193 /* Re-enable the write again for the actual write */
3194 bnx2_enable_nvram_write(bp);
3196 /* Loop to write back the buffer data from page_start to
3199 if (bp->flash_info->buffered == 0) {
3200 for (addr = page_start; addr < data_start;
3201 addr += 4, i += 4) {
3203 rc = bnx2_nvram_write_dword(bp, addr,
3204 &flash_buffer[i], cmd_flags);
3207 goto nvram_write_end;
3213 /* Loop to write the new data from data_start to data_end */
3214 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3215 if ((addr == page_end - 4) ||
3216 ((bp->flash_info->buffered) &&
3217 (addr == data_end - 4))) {
3219 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3221 rc = bnx2_nvram_write_dword(bp, addr, buf,
3225 goto nvram_write_end;
3231 /* Loop to write back the buffer data from data_end
3233 if (bp->flash_info->buffered == 0) {
3234 for (addr = data_end; addr < page_end;
3235 addr += 4, i += 4) {
3237 if (addr == page_end-4) {
3238 cmd_flags = BNX2_NVM_COMMAND_LAST;
3240 rc = bnx2_nvram_write_dword(bp, addr,
3241 &flash_buffer[i], cmd_flags);
3244 goto nvram_write_end;
3250 /* Disable writes to flash interface (lock write-protect) */
3251 bnx2_disable_nvram_write(bp);
3253 /* Disable access to flash interface */
3254 bnx2_disable_nvram_access(bp);
3255 bnx2_release_nvram_lock(bp);
3257 /* Increment written */
3258 written += data_end - data_start;
3262 if (bp->flash_info->buffered == 0)
3263 kfree(flash_buffer);
3265 if (align_start || align_end)
3271 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3276 /* Wait for the current PCI transaction to complete before
3277 * issuing a reset. */
3278 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3279 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3280 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3281 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3282 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3283 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3286 /* Wait for the firmware to tell us it is ok to issue a reset. */
3287 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3289 /* Deposit a driver reset signature so the firmware knows that
3290 * this is a soft reset. */
3291 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3292 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3294 /* Do a dummy read to force the chip to complete all current transaction
3295 * before we issue a reset. */
3296 val = REG_RD(bp, BNX2_MISC_ID);
3298 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3299 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3300 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3303 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3305 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3306 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3309 /* Reset takes approximate 30 usec */
3310 for (i = 0; i < 10; i++) {
3311 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3312 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3313 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3319 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3320 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3321 printk(KERN_ERR PFX "Chip reset did not complete\n");
3325 /* Make sure byte swapping is properly configured. */
3326 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3327 if (val != 0x01020304) {
3328 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3332 /* Wait for the firmware to finish its initialization. */
3333 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3337 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3338 /* Adjust the voltage regular to two steps lower. The default
3339 * of this register is 0x0000000e. */
3340 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3342 /* Remove bad rbuf memory from the free pool. */
3343 rc = bnx2_alloc_bad_rbuf(bp);
3350 bnx2_init_chip(struct bnx2 *bp)
3355 /* Make sure the interrupt is not active. */
3356 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3358 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3359 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3361 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3363 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3364 DMA_READ_CHANS << 12 |
3365 DMA_WRITE_CHANS << 16;
3367 val |= (0x2 << 20) | (1 << 11);
3369 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3372 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3373 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3374 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3376 REG_WR(bp, BNX2_DMA_CONFIG, val);
3378 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3379 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3380 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3381 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3384 if (bp->flags & PCIX_FLAG) {
3387 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3389 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3390 val16 & ~PCI_X_CMD_ERO);
3393 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3394 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3395 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3396 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3398 /* Initialize context mapping and zero out the quick contexts. The
3399 * context block must have already been enabled. */
3400 bnx2_init_context(bp);
3402 if ((rc = bnx2_init_cpus(bp)) != 0)
3405 bnx2_init_nvram(bp);
3407 bnx2_set_mac_addr(bp);
3409 val = REG_RD(bp, BNX2_MQ_CONFIG);
3410 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3411 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3412 REG_WR(bp, BNX2_MQ_CONFIG, val);
3414 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3415 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3416 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3418 val = (BCM_PAGE_BITS - 8) << 24;
3419 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3421 /* Configure page size. */
3422 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3423 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3424 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3425 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3427 val = bp->mac_addr[0] +
3428 (bp->mac_addr[1] << 8) +
3429 (bp->mac_addr[2] << 16) +
3431 (bp->mac_addr[4] << 8) +
3432 (bp->mac_addr[5] << 16);
3433 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3435 /* Program the MTU. Also include 4 bytes for CRC32. */
3436 val = bp->dev->mtu + ETH_HLEN + 4;
3437 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3438 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3439 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3441 bp->last_status_idx = 0;
3442 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3444 /* Set up how to generate a link change interrupt. */
3445 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3447 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3448 (u64) bp->status_blk_mapping & 0xffffffff);
3449 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3451 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3452 (u64) bp->stats_blk_mapping & 0xffffffff);
3453 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3454 (u64) bp->stats_blk_mapping >> 32);
3456 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3457 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3459 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3460 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3462 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3463 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3465 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3467 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3469 REG_WR(bp, BNX2_HC_COM_TICKS,
3470 (bp->com_ticks_int << 16) | bp->com_ticks);
3472 REG_WR(bp, BNX2_HC_CMD_TICKS,
3473 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3475 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3476 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3478 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3479 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3481 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3482 BNX2_HC_CONFIG_TX_TMR_MODE |
3483 BNX2_HC_CONFIG_COLLECT_STATS);
3486 /* Clear internal stats counters. */
3487 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3489 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3491 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3492 BNX2_PORT_FEATURE_ASF_ENABLED)
3493 bp->flags |= ASF_ENABLE_FLAG;
3495 /* Initialize the receive filter. */
3496 bnx2_set_rx_mode(bp->dev);
3498 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3501 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3502 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3506 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3513 bnx2_init_tx_ring(struct bnx2 *bp)
3518 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3520 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3522 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3523 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3528 bp->tx_prod_bseq = 0;
3530 val = BNX2_L2CTX_TYPE_TYPE_L2;
3531 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3532 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3534 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3536 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3538 val = (u64) bp->tx_desc_mapping >> 32;
3539 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3541 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3542 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3546 bnx2_init_rx_ring(struct bnx2 *bp)
3550 u16 prod, ring_prod;
3553 /* 8 for CRC and VLAN */
3554 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3555 /* 8 for alignment */
3556 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3558 ring_prod = prod = bp->rx_prod = 0;
3561 bp->rx_prod_bseq = 0;
3563 for (i = 0; i < bp->rx_max_ring; i++) {
3566 rxbd = &bp->rx_desc_ring[i][0];
3567 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3568 rxbd->rx_bd_len = bp->rx_buf_use_size;
3569 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3571 if (i == (bp->rx_max_ring - 1))
3575 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3576 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3580 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3581 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3583 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3585 val = (u64) bp->rx_desc_mapping[0] >> 32;
3586 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3588 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3589 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3591 for (i = 0; i < bp->rx_ring_size; i++) {
3592 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3595 prod = NEXT_RX_BD(prod);
3596 ring_prod = RX_RING_IDX(prod);
3600 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3602 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3606 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3610 bp->rx_ring_size = size;
3612 while (size > MAX_RX_DESC_CNT) {
3613 size -= MAX_RX_DESC_CNT;
3616 /* round to next power of 2 */
3618 while ((max & num_rings) == 0)
3621 if (num_rings != max)
3624 bp->rx_max_ring = max;
3625 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3629 bnx2_free_tx_skbs(struct bnx2 *bp)
3633 if (bp->tx_buf_ring == NULL)
3636 for (i = 0; i < TX_DESC_CNT; ) {
3637 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3638 struct sk_buff *skb = tx_buf->skb;
3646 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3647 skb_headlen(skb), PCI_DMA_TODEVICE);
3651 last = skb_shinfo(skb)->nr_frags;
3652 for (j = 0; j < last; j++) {
3653 tx_buf = &bp->tx_buf_ring[i + j + 1];
3654 pci_unmap_page(bp->pdev,
3655 pci_unmap_addr(tx_buf, mapping),
3656 skb_shinfo(skb)->frags[j].size,
3666 bnx2_free_rx_skbs(struct bnx2 *bp)
3670 if (bp->rx_buf_ring == NULL)
3673 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3674 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3675 struct sk_buff *skb = rx_buf->skb;
3680 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3681 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3690 bnx2_free_skbs(struct bnx2 *bp)
3692 bnx2_free_tx_skbs(bp);
3693 bnx2_free_rx_skbs(bp);
3697 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3701 rc = bnx2_reset_chip(bp, reset_code);
3706 if ((rc = bnx2_init_chip(bp)) != 0)
3709 bnx2_init_tx_ring(bp);
3710 bnx2_init_rx_ring(bp);
3715 bnx2_init_nic(struct bnx2 *bp)
3719 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3722 spin_lock_bh(&bp->phy_lock);
3724 spin_unlock_bh(&bp->phy_lock);
3730 bnx2_test_registers(struct bnx2 *bp)
3734 static const struct {
3740 { 0x006c, 0, 0x00000000, 0x0000003f },
3741 { 0x0090, 0, 0xffffffff, 0x00000000 },
3742 { 0x0094, 0, 0x00000000, 0x00000000 },
3744 { 0x0404, 0, 0x00003f00, 0x00000000 },
3745 { 0x0418, 0, 0x00000000, 0xffffffff },
3746 { 0x041c, 0, 0x00000000, 0xffffffff },
3747 { 0x0420, 0, 0x00000000, 0x80ffffff },
3748 { 0x0424, 0, 0x00000000, 0x00000000 },
3749 { 0x0428, 0, 0x00000000, 0x00000001 },
3750 { 0x0450, 0, 0x00000000, 0x0000ffff },
3751 { 0x0454, 0, 0x00000000, 0xffffffff },
3752 { 0x0458, 0, 0x00000000, 0xffffffff },
3754 { 0x0808, 0, 0x00000000, 0xffffffff },
3755 { 0x0854, 0, 0x00000000, 0xffffffff },
3756 { 0x0868, 0, 0x00000000, 0x77777777 },
3757 { 0x086c, 0, 0x00000000, 0x77777777 },
3758 { 0x0870, 0, 0x00000000, 0x77777777 },
3759 { 0x0874, 0, 0x00000000, 0x77777777 },
3761 { 0x0c00, 0, 0x00000000, 0x00000001 },
3762 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3763 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3765 { 0x1000, 0, 0x00000000, 0x00000001 },
3766 { 0x1004, 0, 0x00000000, 0x000f0001 },
3768 { 0x1408, 0, 0x01c00800, 0x00000000 },
3769 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3770 { 0x14a8, 0, 0x00000000, 0x000001ff },
3771 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3772 { 0x14b0, 0, 0x00000002, 0x00000001 },
3773 { 0x14b8, 0, 0x00000000, 0x00000000 },
3774 { 0x14c0, 0, 0x00000000, 0x00000009 },
3775 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3776 { 0x14cc, 0, 0x00000000, 0x00000001 },
3777 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3779 { 0x1800, 0, 0x00000000, 0x00000001 },
3780 { 0x1804, 0, 0x00000000, 0x00000003 },
3782 { 0x2800, 0, 0x00000000, 0x00000001 },
3783 { 0x2804, 0, 0x00000000, 0x00003f01 },
3784 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3785 { 0x2810, 0, 0xffff0000, 0x00000000 },
3786 { 0x2814, 0, 0xffff0000, 0x00000000 },
3787 { 0x2818, 0, 0xffff0000, 0x00000000 },
3788 { 0x281c, 0, 0xffff0000, 0x00000000 },
3789 { 0x2834, 0, 0xffffffff, 0x00000000 },
3790 { 0x2840, 0, 0x00000000, 0xffffffff },
3791 { 0x2844, 0, 0x00000000, 0xffffffff },
3792 { 0x2848, 0, 0xffffffff, 0x00000000 },
3793 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3795 { 0x2c00, 0, 0x00000000, 0x00000011 },
3796 { 0x2c04, 0, 0x00000000, 0x00030007 },
3798 { 0x3c00, 0, 0x00000000, 0x00000001 },
3799 { 0x3c04, 0, 0x00000000, 0x00070000 },
3800 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3801 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3802 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3803 { 0x3c14, 0, 0x00000000, 0xffffffff },
3804 { 0x3c18, 0, 0x00000000, 0xffffffff },
3805 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3806 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3808 { 0x5004, 0, 0x00000000, 0x0000007f },
3809 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3810 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3812 { 0x5c00, 0, 0x00000000, 0x00000001 },
3813 { 0x5c04, 0, 0x00000000, 0x0003000f },
3814 { 0x5c08, 0, 0x00000003, 0x00000000 },
3815 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3816 { 0x5c10, 0, 0x00000000, 0xffffffff },
3817 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3818 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3819 { 0x5c88, 0, 0x00000000, 0x00077373 },
3820 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3822 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3823 { 0x680c, 0, 0xffffffff, 0x00000000 },
3824 { 0x6810, 0, 0xffffffff, 0x00000000 },
3825 { 0x6814, 0, 0xffffffff, 0x00000000 },
3826 { 0x6818, 0, 0xffffffff, 0x00000000 },
3827 { 0x681c, 0, 0xffffffff, 0x00000000 },
3828 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3829 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3830 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3831 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3832 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3833 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3835 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3836 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3837 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3838 { 0x684c, 0, 0xffffffff, 0x00000000 },
3839 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3840 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3841 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3843 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3844 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3846 { 0xffff, 0, 0x00000000, 0x00000000 },
3850 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3851 u32 offset, rw_mask, ro_mask, save_val, val;
3853 offset = (u32) reg_tbl[i].offset;
3854 rw_mask = reg_tbl[i].rw_mask;
3855 ro_mask = reg_tbl[i].ro_mask;
3857 save_val = readl(bp->regview + offset);
3859 writel(0, bp->regview + offset);
3861 val = readl(bp->regview + offset);
3862 if ((val & rw_mask) != 0) {
3866 if ((val & ro_mask) != (save_val & ro_mask)) {
3870 writel(0xffffffff, bp->regview + offset);
3872 val = readl(bp->regview + offset);
3873 if ((val & rw_mask) != rw_mask) {
3877 if ((val & ro_mask) != (save_val & ro_mask)) {
3881 writel(save_val, bp->regview + offset);
3885 writel(save_val, bp->regview + offset);
3893 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3895 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3896 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3899 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3902 for (offset = 0; offset < size; offset += 4) {
3904 REG_WR_IND(bp, start + offset, test_pattern[i]);
3906 if (REG_RD_IND(bp, start + offset) !=
3916 bnx2_test_memory(struct bnx2 *bp)
3920 static const struct {
3924 { 0x60000, 0x4000 },
3925 { 0xa0000, 0x3000 },
3926 { 0xe0000, 0x4000 },
3927 { 0x120000, 0x4000 },
3928 { 0x1a0000, 0x4000 },
3929 { 0x160000, 0x4000 },
3933 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3934 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3935 mem_tbl[i].len)) != 0) {
3943 #define BNX2_MAC_LOOPBACK 0
3944 #define BNX2_PHY_LOOPBACK 1
3947 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3949 unsigned int pkt_size, num_pkts, i;
3950 struct sk_buff *skb, *rx_skb;
3951 unsigned char *packet;
3952 u16 rx_start_idx, rx_idx;
3955 struct sw_bd *rx_buf;
3956 struct l2_fhdr *rx_hdr;
3959 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3960 bp->loopback = MAC_LOOPBACK;
3961 bnx2_set_mac_loopback(bp);
3963 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3964 bp->loopback = PHY_LOOPBACK;
3965 bnx2_set_phy_loopback(bp);
3971 skb = netdev_alloc_skb(bp->dev, pkt_size);
3974 packet = skb_put(skb, pkt_size);
3975 memcpy(packet, bp->mac_addr, 6);
3976 memset(packet + 6, 0x0, 8);
3977 for (i = 14; i < pkt_size; i++)
3978 packet[i] = (unsigned char) (i & 0xff);
3980 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3983 REG_WR(bp, BNX2_HC_COMMAND,
3984 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3986 REG_RD(bp, BNX2_HC_COMMAND);
3989 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3993 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3995 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3996 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3997 txbd->tx_bd_mss_nbytes = pkt_size;
3998 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4001 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4002 bp->tx_prod_bseq += pkt_size;
4004 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4005 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4009 REG_WR(bp, BNX2_HC_COMMAND,
4010 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4012 REG_RD(bp, BNX2_HC_COMMAND);
4016 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4019 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4020 goto loopback_test_done;
4023 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4024 if (rx_idx != rx_start_idx + num_pkts) {
4025 goto loopback_test_done;
4028 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4029 rx_skb = rx_buf->skb;
4031 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4032 skb_reserve(rx_skb, bp->rx_offset);
4034 pci_dma_sync_single_for_cpu(bp->pdev,
4035 pci_unmap_addr(rx_buf, mapping),
4036 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4038 if (rx_hdr->l2_fhdr_status &
4039 (L2_FHDR_ERRORS_BAD_CRC |
4040 L2_FHDR_ERRORS_PHY_DECODE |
4041 L2_FHDR_ERRORS_ALIGNMENT |
4042 L2_FHDR_ERRORS_TOO_SHORT |
4043 L2_FHDR_ERRORS_GIANT_FRAME)) {
4045 goto loopback_test_done;
4048 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4049 goto loopback_test_done;
4052 for (i = 14; i < pkt_size; i++) {
4053 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4054 goto loopback_test_done;
4065 #define BNX2_MAC_LOOPBACK_FAILED 1
4066 #define BNX2_PHY_LOOPBACK_FAILED 2
4067 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4068 BNX2_PHY_LOOPBACK_FAILED)
4071 bnx2_test_loopback(struct bnx2 *bp)
4075 if (!netif_running(bp->dev))
4076 return BNX2_LOOPBACK_FAILED;
4078 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4079 spin_lock_bh(&bp->phy_lock);
4081 spin_unlock_bh(&bp->phy_lock);
4082 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4083 rc |= BNX2_MAC_LOOPBACK_FAILED;
4084 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4085 rc |= BNX2_PHY_LOOPBACK_FAILED;
4089 #define NVRAM_SIZE 0x200
4090 #define CRC32_RESIDUAL 0xdebb20e3
4093 bnx2_test_nvram(struct bnx2 *bp)
4095 u32 buf[NVRAM_SIZE / 4];
4096 u8 *data = (u8 *) buf;
4100 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4101 goto test_nvram_done;
4103 magic = be32_to_cpu(buf[0]);
4104 if (magic != 0x669955aa) {
4106 goto test_nvram_done;
4109 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4110 goto test_nvram_done;
4112 csum = ether_crc_le(0x100, data);
4113 if (csum != CRC32_RESIDUAL) {
4115 goto test_nvram_done;
4118 csum = ether_crc_le(0x100, data + 0x100);
4119 if (csum != CRC32_RESIDUAL) {
4128 bnx2_test_link(struct bnx2 *bp)
4132 spin_lock_bh(&bp->phy_lock);
4133 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4134 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4135 spin_unlock_bh(&bp->phy_lock);
4137 if (bmsr & BMSR_LSTATUS) {
4144 bnx2_test_intr(struct bnx2 *bp)
4149 if (!netif_running(bp->dev))
4152 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4154 /* This register is not touched during run-time. */
4155 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4156 REG_RD(bp, BNX2_HC_COMMAND);
4158 for (i = 0; i < 10; i++) {
4159 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4165 msleep_interruptible(10);
4174 bnx2_5706_serdes_timer(struct bnx2 *bp)
4176 spin_lock(&bp->phy_lock);
4177 if (bp->serdes_an_pending)
4178 bp->serdes_an_pending--;
4179 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4182 bp->current_interval = bp->timer_interval;
4184 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4186 if (bmcr & BMCR_ANENABLE) {
4189 bnx2_write_phy(bp, 0x1c, 0x7c00);
4190 bnx2_read_phy(bp, 0x1c, &phy1);
4192 bnx2_write_phy(bp, 0x17, 0x0f01);
4193 bnx2_read_phy(bp, 0x15, &phy2);
4194 bnx2_write_phy(bp, 0x17, 0x0f01);
4195 bnx2_read_phy(bp, 0x15, &phy2);
4197 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4198 !(phy2 & 0x20)) { /* no CONFIG */
4200 bmcr &= ~BMCR_ANENABLE;
4201 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4202 bnx2_write_phy(bp, MII_BMCR, bmcr);
4203 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4207 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4208 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4211 bnx2_write_phy(bp, 0x17, 0x0f01);
4212 bnx2_read_phy(bp, 0x15, &phy2);
4216 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4217 bmcr |= BMCR_ANENABLE;
4218 bnx2_write_phy(bp, MII_BMCR, bmcr);
4220 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4223 bp->current_interval = bp->timer_interval;
4225 spin_unlock(&bp->phy_lock);
4229 bnx2_5708_serdes_timer(struct bnx2 *bp)
4231 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4232 bp->serdes_an_pending = 0;
4236 spin_lock(&bp->phy_lock);
4237 if (bp->serdes_an_pending)
4238 bp->serdes_an_pending--;
4239 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4242 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4244 if (bmcr & BMCR_ANENABLE) {
4245 bmcr &= ~BMCR_ANENABLE;
4246 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4247 bnx2_write_phy(bp, MII_BMCR, bmcr);
4248 bp->current_interval = SERDES_FORCED_TIMEOUT;
4250 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4251 bmcr |= BMCR_ANENABLE;
4252 bnx2_write_phy(bp, MII_BMCR, bmcr);
4253 bp->serdes_an_pending = 2;
4254 bp->current_interval = bp->timer_interval;
4258 bp->current_interval = bp->timer_interval;
4260 spin_unlock(&bp->phy_lock);
4264 bnx2_timer(unsigned long data)
4266 struct bnx2 *bp = (struct bnx2 *) data;
4269 if (!netif_running(bp->dev))
4272 if (atomic_read(&bp->intr_sem) != 0)
4273 goto bnx2_restart_timer;
4275 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4276 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4278 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4280 if (bp->phy_flags & PHY_SERDES_FLAG) {
4281 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4282 bnx2_5706_serdes_timer(bp);
4283 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4284 bnx2_5708_serdes_timer(bp);
4288 mod_timer(&bp->timer, jiffies + bp->current_interval);
4291 /* Called with rtnl_lock */
4293 bnx2_open(struct net_device *dev)
4295 struct bnx2 *bp = netdev_priv(dev);
4298 bnx2_set_power_state(bp, PCI_D0);
4299 bnx2_disable_int(bp);
4301 rc = bnx2_alloc_mem(bp);
4305 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4306 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4309 if (pci_enable_msi(bp->pdev) == 0) {
4310 bp->flags |= USING_MSI_FLAG;
4311 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4315 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4316 IRQF_SHARED, dev->name, dev);
4320 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4328 rc = bnx2_init_nic(bp);
4331 free_irq(bp->pdev->irq, dev);
4332 if (bp->flags & USING_MSI_FLAG) {
4333 pci_disable_msi(bp->pdev);
4334 bp->flags &= ~USING_MSI_FLAG;
4341 mod_timer(&bp->timer, jiffies + bp->current_interval);
4343 atomic_set(&bp->intr_sem, 0);
4345 bnx2_enable_int(bp);
4347 if (bp->flags & USING_MSI_FLAG) {
4348 /* Test MSI to make sure it is working
4349 * If MSI test fails, go back to INTx mode
4351 if (bnx2_test_intr(bp) != 0) {
4352 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4353 " using MSI, switching to INTx mode. Please"
4354 " report this failure to the PCI maintainer"
4355 " and include system chipset information.\n",
4358 bnx2_disable_int(bp);
4359 free_irq(bp->pdev->irq, dev);
4360 pci_disable_msi(bp->pdev);
4361 bp->flags &= ~USING_MSI_FLAG;
4363 rc = bnx2_init_nic(bp);
4366 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4367 IRQF_SHARED, dev->name, dev);
4372 del_timer_sync(&bp->timer);
4375 bnx2_enable_int(bp);
4378 if (bp->flags & USING_MSI_FLAG) {
4379 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4382 netif_start_queue(dev);
4388 bnx2_reset_task(void *data)
4390 struct bnx2 *bp = data;
4392 if (!netif_running(bp->dev))
4395 bp->in_reset_task = 1;
4396 bnx2_netif_stop(bp);
4400 atomic_set(&bp->intr_sem, 1);
4401 bnx2_netif_start(bp);
4402 bp->in_reset_task = 0;
4406 bnx2_tx_timeout(struct net_device *dev)
4408 struct bnx2 *bp = netdev_priv(dev);
4410 /* This allows the netif to be shutdown gracefully before resetting */
4411 schedule_work(&bp->reset_task);
4415 /* Called with rtnl_lock */
4417 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4419 struct bnx2 *bp = netdev_priv(dev);
4421 bnx2_netif_stop(bp);
4424 bnx2_set_rx_mode(dev);
4426 bnx2_netif_start(bp);
4429 /* Called with rtnl_lock */
4431 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4433 struct bnx2 *bp = netdev_priv(dev);
4435 bnx2_netif_stop(bp);
4438 bp->vlgrp->vlan_devices[vid] = NULL;
4439 bnx2_set_rx_mode(dev);
4441 bnx2_netif_start(bp);
4445 /* Called with netif_tx_lock.
4446 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4447 * netif_wake_queue().
4450 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4452 struct bnx2 *bp = netdev_priv(dev);
4455 struct sw_bd *tx_buf;
4456 u32 len, vlan_tag_flags, last_frag, mss;
4457 u16 prod, ring_prod;
4460 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4461 netif_stop_queue(dev);
4462 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4465 return NETDEV_TX_BUSY;
4467 len = skb_headlen(skb);
4469 ring_prod = TX_RING_IDX(prod);
4472 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4473 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4476 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4478 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4481 if ((mss = skb_shinfo(skb)->gso_size) &&
4482 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4483 u32 tcp_opt_len, ip_tcp_len;
4485 if (skb_header_cloned(skb) &&
4486 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4488 return NETDEV_TX_OK;
4491 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4492 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4495 if (skb->h.th->doff > 5) {
4496 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4498 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4500 skb->nh.iph->check = 0;
4501 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4503 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4507 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4508 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4509 (tcp_opt_len >> 2)) << 8;
4518 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4520 tx_buf = &bp->tx_buf_ring[ring_prod];
4522 pci_unmap_addr_set(tx_buf, mapping, mapping);
4524 txbd = &bp->tx_desc_ring[ring_prod];
4526 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4527 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4528 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4529 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4531 last_frag = skb_shinfo(skb)->nr_frags;
4533 for (i = 0; i < last_frag; i++) {
4534 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4536 prod = NEXT_TX_BD(prod);
4537 ring_prod = TX_RING_IDX(prod);
4538 txbd = &bp->tx_desc_ring[ring_prod];
4541 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4542 len, PCI_DMA_TODEVICE);
4543 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4546 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4547 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4548 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4549 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4552 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4554 prod = NEXT_TX_BD(prod);
4555 bp->tx_prod_bseq += skb->len;
4557 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4558 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4563 dev->trans_start = jiffies;
4565 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4566 netif_stop_queue(dev);
4567 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4568 netif_wake_queue(dev);
4571 return NETDEV_TX_OK;
4574 /* Called with rtnl_lock */
4576 bnx2_close(struct net_device *dev)
4578 struct bnx2 *bp = netdev_priv(dev);
4581 /* Calling flush_scheduled_work() may deadlock because
4582 * linkwatch_event() may be on the workqueue and it will try to get
4583 * the rtnl_lock which we are holding.
4585 while (bp->in_reset_task)
4588 bnx2_netif_stop(bp);
4589 del_timer_sync(&bp->timer);
4590 if (bp->flags & NO_WOL_FLAG)
4591 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4593 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4595 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4596 bnx2_reset_chip(bp, reset_code);
4597 free_irq(bp->pdev->irq, dev);
4598 if (bp->flags & USING_MSI_FLAG) {
4599 pci_disable_msi(bp->pdev);
4600 bp->flags &= ~USING_MSI_FLAG;
4605 netif_carrier_off(bp->dev);
4606 bnx2_set_power_state(bp, PCI_D3hot);
4610 #define GET_NET_STATS64(ctr) \
4611 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4612 (unsigned long) (ctr##_lo)
4614 #define GET_NET_STATS32(ctr) \
4617 #if (BITS_PER_LONG == 64)
4618 #define GET_NET_STATS GET_NET_STATS64
4620 #define GET_NET_STATS GET_NET_STATS32
4623 static struct net_device_stats *
4624 bnx2_get_stats(struct net_device *dev)
4626 struct bnx2 *bp = netdev_priv(dev);
4627 struct statistics_block *stats_blk = bp->stats_blk;
4628 struct net_device_stats *net_stats = &bp->net_stats;
4630 if (bp->stats_blk == NULL) {
4633 net_stats->rx_packets =
4634 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4635 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4636 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4638 net_stats->tx_packets =
4639 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4640 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4641 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4643 net_stats->rx_bytes =
4644 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4646 net_stats->tx_bytes =
4647 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4649 net_stats->multicast =
4650 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4652 net_stats->collisions =
4653 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4655 net_stats->rx_length_errors =
4656 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4657 stats_blk->stat_EtherStatsOverrsizePkts);
4659 net_stats->rx_over_errors =
4660 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4662 net_stats->rx_frame_errors =
4663 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4665 net_stats->rx_crc_errors =
4666 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4668 net_stats->rx_errors = net_stats->rx_length_errors +
4669 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4670 net_stats->rx_crc_errors;
4672 net_stats->tx_aborted_errors =
4673 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4674 stats_blk->stat_Dot3StatsLateCollisions);
4676 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4677 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4678 net_stats->tx_carrier_errors = 0;
4680 net_stats->tx_carrier_errors =
4682 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4685 net_stats->tx_errors =
4687 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4689 net_stats->tx_aborted_errors +
4690 net_stats->tx_carrier_errors;
4692 net_stats->rx_missed_errors =
4693 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4694 stats_blk->stat_FwRxDrop);
4699 /* All ethtool functions called with rtnl_lock */
4702 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4704 struct bnx2 *bp = netdev_priv(dev);
4706 cmd->supported = SUPPORTED_Autoneg;
4707 if (bp->phy_flags & PHY_SERDES_FLAG) {
4708 cmd->supported |= SUPPORTED_1000baseT_Full |
4711 cmd->port = PORT_FIBRE;
4714 cmd->supported |= SUPPORTED_10baseT_Half |
4715 SUPPORTED_10baseT_Full |
4716 SUPPORTED_100baseT_Half |
4717 SUPPORTED_100baseT_Full |
4718 SUPPORTED_1000baseT_Full |
4721 cmd->port = PORT_TP;
4724 cmd->advertising = bp->advertising;
4726 if (bp->autoneg & AUTONEG_SPEED) {
4727 cmd->autoneg = AUTONEG_ENABLE;
4730 cmd->autoneg = AUTONEG_DISABLE;
4733 if (netif_carrier_ok(dev)) {
4734 cmd->speed = bp->line_speed;
4735 cmd->duplex = bp->duplex;
4742 cmd->transceiver = XCVR_INTERNAL;
4743 cmd->phy_address = bp->phy_addr;
4749 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4751 struct bnx2 *bp = netdev_priv(dev);
4752 u8 autoneg = bp->autoneg;
4753 u8 req_duplex = bp->req_duplex;
4754 u16 req_line_speed = bp->req_line_speed;
4755 u32 advertising = bp->advertising;
4757 if (cmd->autoneg == AUTONEG_ENABLE) {
4758 autoneg |= AUTONEG_SPEED;
4760 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4762 /* allow advertising 1 speed */
4763 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4764 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4765 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4766 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4768 if (bp->phy_flags & PHY_SERDES_FLAG)
4771 advertising = cmd->advertising;
4774 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4775 advertising = cmd->advertising;
4777 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4781 if (bp->phy_flags & PHY_SERDES_FLAG) {
4782 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4785 advertising = ETHTOOL_ALL_COPPER_SPEED;
4788 advertising |= ADVERTISED_Autoneg;
4791 if (bp->phy_flags & PHY_SERDES_FLAG) {
4792 if ((cmd->speed != SPEED_1000 &&
4793 cmd->speed != SPEED_2500) ||
4794 (cmd->duplex != DUPLEX_FULL))
4797 if (cmd->speed == SPEED_2500 &&
4798 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4801 else if (cmd->speed == SPEED_1000) {
4804 autoneg &= ~AUTONEG_SPEED;
4805 req_line_speed = cmd->speed;
4806 req_duplex = cmd->duplex;
4810 bp->autoneg = autoneg;
4811 bp->advertising = advertising;
4812 bp->req_line_speed = req_line_speed;
4813 bp->req_duplex = req_duplex;
4815 spin_lock_bh(&bp->phy_lock);
4819 spin_unlock_bh(&bp->phy_lock);
4825 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4827 struct bnx2 *bp = netdev_priv(dev);
4829 strcpy(info->driver, DRV_MODULE_NAME);
4830 strcpy(info->version, DRV_MODULE_VERSION);
4831 strcpy(info->bus_info, pci_name(bp->pdev));
4832 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4833 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4834 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4835 info->fw_version[1] = info->fw_version[3] = '.';
4836 info->fw_version[5] = 0;
4839 #define BNX2_REGDUMP_LEN (32 * 1024)
4842 bnx2_get_regs_len(struct net_device *dev)
4844 return BNX2_REGDUMP_LEN;
4848 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4850 u32 *p = _p, i, offset;
4852 struct bnx2 *bp = netdev_priv(dev);
4853 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4854 0x0800, 0x0880, 0x0c00, 0x0c10,
4855 0x0c30, 0x0d08, 0x1000, 0x101c,
4856 0x1040, 0x1048, 0x1080, 0x10a4,
4857 0x1400, 0x1490, 0x1498, 0x14f0,
4858 0x1500, 0x155c, 0x1580, 0x15dc,
4859 0x1600, 0x1658, 0x1680, 0x16d8,
4860 0x1800, 0x1820, 0x1840, 0x1854,
4861 0x1880, 0x1894, 0x1900, 0x1984,
4862 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4863 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4864 0x2000, 0x2030, 0x23c0, 0x2400,
4865 0x2800, 0x2820, 0x2830, 0x2850,
4866 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4867 0x3c00, 0x3c94, 0x4000, 0x4010,
4868 0x4080, 0x4090, 0x43c0, 0x4458,
4869 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4870 0x4fc0, 0x5010, 0x53c0, 0x5444,
4871 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4872 0x5fc0, 0x6000, 0x6400, 0x6428,
4873 0x6800, 0x6848, 0x684c, 0x6860,
4874 0x6888, 0x6910, 0x8000 };
4878 memset(p, 0, BNX2_REGDUMP_LEN);
4880 if (!netif_running(bp->dev))
4884 offset = reg_boundaries[0];
4886 while (offset < BNX2_REGDUMP_LEN) {
4887 *p++ = REG_RD(bp, offset);
4889 if (offset == reg_boundaries[i + 1]) {
4890 offset = reg_boundaries[i + 2];
4891 p = (u32 *) (orig_p + offset);
4898 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4900 struct bnx2 *bp = netdev_priv(dev);
4902 if (bp->flags & NO_WOL_FLAG) {
4907 wol->supported = WAKE_MAGIC;
4909 wol->wolopts = WAKE_MAGIC;
4913 memset(&wol->sopass, 0, sizeof(wol->sopass));
4917 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4919 struct bnx2 *bp = netdev_priv(dev);
4921 if (wol->wolopts & ~WAKE_MAGIC)
4924 if (wol->wolopts & WAKE_MAGIC) {
4925 if (bp->flags & NO_WOL_FLAG)
4937 bnx2_nway_reset(struct net_device *dev)
4939 struct bnx2 *bp = netdev_priv(dev);
4942 if (!(bp->autoneg & AUTONEG_SPEED)) {
4946 spin_lock_bh(&bp->phy_lock);
4948 /* Force a link down visible on the other side */
4949 if (bp->phy_flags & PHY_SERDES_FLAG) {
4950 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4951 spin_unlock_bh(&bp->phy_lock);
4955 spin_lock_bh(&bp->phy_lock);
4957 bp->current_interval = SERDES_AN_TIMEOUT;
4958 bp->serdes_an_pending = 1;
4959 mod_timer(&bp->timer, jiffies + bp->current_interval);
4962 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4963 bmcr &= ~BMCR_LOOPBACK;
4964 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4966 spin_unlock_bh(&bp->phy_lock);
4972 bnx2_get_eeprom_len(struct net_device *dev)
4974 struct bnx2 *bp = netdev_priv(dev);
4976 if (bp->flash_info == NULL)
4979 return (int) bp->flash_size;
4983 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4986 struct bnx2 *bp = netdev_priv(dev);
4989 /* parameters already validated in ethtool_get_eeprom */
4991 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4997 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5000 struct bnx2 *bp = netdev_priv(dev);
5003 /* parameters already validated in ethtool_set_eeprom */
5005 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5011 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5013 struct bnx2 *bp = netdev_priv(dev);
5015 memset(coal, 0, sizeof(struct ethtool_coalesce));
5017 coal->rx_coalesce_usecs = bp->rx_ticks;
5018 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5019 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5020 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5022 coal->tx_coalesce_usecs = bp->tx_ticks;
5023 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5024 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5025 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5027 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5033 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5035 struct bnx2 *bp = netdev_priv(dev);
5037 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5038 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5040 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5041 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5043 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5044 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5046 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5047 if (bp->rx_quick_cons_trip_int > 0xff)
5048 bp->rx_quick_cons_trip_int = 0xff;
5050 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5051 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5053 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5054 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5056 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5057 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5059 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5060 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5063 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5064 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5065 bp->stats_ticks &= 0xffff00;
5067 if (netif_running(bp->dev)) {
5068 bnx2_netif_stop(bp);
5070 bnx2_netif_start(bp);
5077 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5079 struct bnx2 *bp = netdev_priv(dev);
5081 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5082 ering->rx_mini_max_pending = 0;
5083 ering->rx_jumbo_max_pending = 0;
5085 ering->rx_pending = bp->rx_ring_size;
5086 ering->rx_mini_pending = 0;
5087 ering->rx_jumbo_pending = 0;
5089 ering->tx_max_pending = MAX_TX_DESC_CNT;
5090 ering->tx_pending = bp->tx_ring_size;
5094 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5096 struct bnx2 *bp = netdev_priv(dev);
5098 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5099 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5100 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5104 if (netif_running(bp->dev)) {
5105 bnx2_netif_stop(bp);
5106 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5111 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5112 bp->tx_ring_size = ering->tx_pending;
5114 if (netif_running(bp->dev)) {
5117 rc = bnx2_alloc_mem(bp);
5121 bnx2_netif_start(bp);
5128 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5130 struct bnx2 *bp = netdev_priv(dev);
5132 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5133 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5134 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5138 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5140 struct bnx2 *bp = netdev_priv(dev);
5142 bp->req_flow_ctrl = 0;
5143 if (epause->rx_pause)
5144 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5145 if (epause->tx_pause)
5146 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5148 if (epause->autoneg) {
5149 bp->autoneg |= AUTONEG_FLOW_CTRL;
5152 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5155 spin_lock_bh(&bp->phy_lock);
5159 spin_unlock_bh(&bp->phy_lock);
5165 bnx2_get_rx_csum(struct net_device *dev)
5167 struct bnx2 *bp = netdev_priv(dev);
5173 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5175 struct bnx2 *bp = netdev_priv(dev);
5182 bnx2_set_tso(struct net_device *dev, u32 data)
5185 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5187 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5191 #define BNX2_NUM_STATS 46
5194 char string[ETH_GSTRING_LEN];
5195 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5197 { "rx_error_bytes" },
5199 { "tx_error_bytes" },
5200 { "rx_ucast_packets" },
5201 { "rx_mcast_packets" },
5202 { "rx_bcast_packets" },
5203 { "tx_ucast_packets" },
5204 { "tx_mcast_packets" },
5205 { "tx_bcast_packets" },
5206 { "tx_mac_errors" },
5207 { "tx_carrier_errors" },
5208 { "rx_crc_errors" },
5209 { "rx_align_errors" },
5210 { "tx_single_collisions" },
5211 { "tx_multi_collisions" },
5213 { "tx_excess_collisions" },
5214 { "tx_late_collisions" },
5215 { "tx_total_collisions" },
5218 { "rx_undersize_packets" },
5219 { "rx_oversize_packets" },
5220 { "rx_64_byte_packets" },
5221 { "rx_65_to_127_byte_packets" },
5222 { "rx_128_to_255_byte_packets" },
5223 { "rx_256_to_511_byte_packets" },
5224 { "rx_512_to_1023_byte_packets" },
5225 { "rx_1024_to_1522_byte_packets" },
5226 { "rx_1523_to_9022_byte_packets" },
5227 { "tx_64_byte_packets" },
5228 { "tx_65_to_127_byte_packets" },
5229 { "tx_128_to_255_byte_packets" },
5230 { "tx_256_to_511_byte_packets" },
5231 { "tx_512_to_1023_byte_packets" },
5232 { "tx_1024_to_1522_byte_packets" },
5233 { "tx_1523_to_9022_byte_packets" },
5234 { "rx_xon_frames" },
5235 { "rx_xoff_frames" },
5236 { "tx_xon_frames" },
5237 { "tx_xoff_frames" },
5238 { "rx_mac_ctrl_frames" },
5239 { "rx_filtered_packets" },
5241 { "rx_fw_discards" },
5244 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5246 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5247 STATS_OFFSET32(stat_IfHCInOctets_hi),
5248 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5249 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5250 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5251 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5252 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5253 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5254 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5255 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5256 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5257 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5258 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5259 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5260 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5261 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5262 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5263 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5264 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5265 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5266 STATS_OFFSET32(stat_EtherStatsCollisions),
5267 STATS_OFFSET32(stat_EtherStatsFragments),
5268 STATS_OFFSET32(stat_EtherStatsJabbers),
5269 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5270 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5271 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5273 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5274 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5275 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5276 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5277 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5278 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5279 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5280 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5281 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5282 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5283 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5284 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5285 STATS_OFFSET32(stat_XonPauseFramesReceived),
5286 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5287 STATS_OFFSET32(stat_OutXonSent),
5288 STATS_OFFSET32(stat_OutXoffSent),
5289 STATS_OFFSET32(stat_MacControlFramesReceived),
5290 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5291 STATS_OFFSET32(stat_IfInMBUFDiscards),
5292 STATS_OFFSET32(stat_FwRxDrop),
5295 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5296 * skipped because of errata.
5298 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5299 8,0,8,8,8,8,8,8,8,8,
5300 4,0,4,4,4,4,4,4,4,4,
5301 4,4,4,4,4,4,4,4,4,4,
5302 4,4,4,4,4,4,4,4,4,4,
5306 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5307 8,0,8,8,8,8,8,8,8,8,
5308 4,4,4,4,4,4,4,4,4,4,
5309 4,4,4,4,4,4,4,4,4,4,
5310 4,4,4,4,4,4,4,4,4,4,
5314 #define BNX2_NUM_TESTS 6
5317 char string[ETH_GSTRING_LEN];
5318 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5319 { "register_test (offline)" },
5320 { "memory_test (offline)" },
5321 { "loopback_test (offline)" },
5322 { "nvram_test (online)" },
5323 { "interrupt_test (online)" },
5324 { "link_test (online)" },
5328 bnx2_self_test_count(struct net_device *dev)
5330 return BNX2_NUM_TESTS;
5334 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5336 struct bnx2 *bp = netdev_priv(dev);
5338 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5339 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5342 bnx2_netif_stop(bp);
5343 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5346 if (bnx2_test_registers(bp) != 0) {
5348 etest->flags |= ETH_TEST_FL_FAILED;
5350 if (bnx2_test_memory(bp) != 0) {
5352 etest->flags |= ETH_TEST_FL_FAILED;
5354 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5355 etest->flags |= ETH_TEST_FL_FAILED;
5357 if (!netif_running(bp->dev)) {
5358 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5362 bnx2_netif_start(bp);
5365 /* wait for link up */
5366 for (i = 0; i < 7; i++) {
5369 msleep_interruptible(1000);
5373 if (bnx2_test_nvram(bp) != 0) {
5375 etest->flags |= ETH_TEST_FL_FAILED;
5377 if (bnx2_test_intr(bp) != 0) {
5379 etest->flags |= ETH_TEST_FL_FAILED;
5382 if (bnx2_test_link(bp) != 0) {
5384 etest->flags |= ETH_TEST_FL_FAILED;
5390 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5392 switch (stringset) {
5394 memcpy(buf, bnx2_stats_str_arr,
5395 sizeof(bnx2_stats_str_arr));
5398 memcpy(buf, bnx2_tests_str_arr,
5399 sizeof(bnx2_tests_str_arr));
5405 bnx2_get_stats_count(struct net_device *dev)
5407 return BNX2_NUM_STATS;
5411 bnx2_get_ethtool_stats(struct net_device *dev,
5412 struct ethtool_stats *stats, u64 *buf)
5414 struct bnx2 *bp = netdev_priv(dev);
5416 u32 *hw_stats = (u32 *) bp->stats_blk;
5417 u8 *stats_len_arr = NULL;
5419 if (hw_stats == NULL) {
5420 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5424 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5425 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5426 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5427 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5428 stats_len_arr = bnx2_5706_stats_len_arr;
5430 stats_len_arr = bnx2_5708_stats_len_arr;
5432 for (i = 0; i < BNX2_NUM_STATS; i++) {
5433 if (stats_len_arr[i] == 0) {
5434 /* skip this counter */
5438 if (stats_len_arr[i] == 4) {
5439 /* 4-byte counter */
5441 *(hw_stats + bnx2_stats_offset_arr[i]);
5444 /* 8-byte counter */
5445 buf[i] = (((u64) *(hw_stats +
5446 bnx2_stats_offset_arr[i])) << 32) +
5447 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5452 bnx2_phys_id(struct net_device *dev, u32 data)
5454 struct bnx2 *bp = netdev_priv(dev);
5461 save = REG_RD(bp, BNX2_MISC_CFG);
5462 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5464 for (i = 0; i < (data * 2); i++) {
5466 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5469 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5470 BNX2_EMAC_LED_1000MB_OVERRIDE |
5471 BNX2_EMAC_LED_100MB_OVERRIDE |
5472 BNX2_EMAC_LED_10MB_OVERRIDE |
5473 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5474 BNX2_EMAC_LED_TRAFFIC);
5476 msleep_interruptible(500);
5477 if (signal_pending(current))
5480 REG_WR(bp, BNX2_EMAC_LED, 0);
5481 REG_WR(bp, BNX2_MISC_CFG, save);
5485 static const struct ethtool_ops bnx2_ethtool_ops = {
5486 .get_settings = bnx2_get_settings,
5487 .set_settings = bnx2_set_settings,
5488 .get_drvinfo = bnx2_get_drvinfo,
5489 .get_regs_len = bnx2_get_regs_len,
5490 .get_regs = bnx2_get_regs,
5491 .get_wol = bnx2_get_wol,
5492 .set_wol = bnx2_set_wol,
5493 .nway_reset = bnx2_nway_reset,
5494 .get_link = ethtool_op_get_link,
5495 .get_eeprom_len = bnx2_get_eeprom_len,
5496 .get_eeprom = bnx2_get_eeprom,
5497 .set_eeprom = bnx2_set_eeprom,
5498 .get_coalesce = bnx2_get_coalesce,
5499 .set_coalesce = bnx2_set_coalesce,
5500 .get_ringparam = bnx2_get_ringparam,
5501 .set_ringparam = bnx2_set_ringparam,
5502 .get_pauseparam = bnx2_get_pauseparam,
5503 .set_pauseparam = bnx2_set_pauseparam,
5504 .get_rx_csum = bnx2_get_rx_csum,
5505 .set_rx_csum = bnx2_set_rx_csum,
5506 .get_tx_csum = ethtool_op_get_tx_csum,
5507 .set_tx_csum = ethtool_op_set_tx_csum,
5508 .get_sg = ethtool_op_get_sg,
5509 .set_sg = ethtool_op_set_sg,
5511 .get_tso = ethtool_op_get_tso,
5512 .set_tso = bnx2_set_tso,
5514 .self_test_count = bnx2_self_test_count,
5515 .self_test = bnx2_self_test,
5516 .get_strings = bnx2_get_strings,
5517 .phys_id = bnx2_phys_id,
5518 .get_stats_count = bnx2_get_stats_count,
5519 .get_ethtool_stats = bnx2_get_ethtool_stats,
5520 .get_perm_addr = ethtool_op_get_perm_addr,
5523 /* Called with rtnl_lock */
5525 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5527 struct mii_ioctl_data *data = if_mii(ifr);
5528 struct bnx2 *bp = netdev_priv(dev);
5533 data->phy_id = bp->phy_addr;
5539 spin_lock_bh(&bp->phy_lock);
5540 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5541 spin_unlock_bh(&bp->phy_lock);
5543 data->val_out = mii_regval;
5549 if (!capable(CAP_NET_ADMIN))
5552 spin_lock_bh(&bp->phy_lock);
5553 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5554 spin_unlock_bh(&bp->phy_lock);
5565 /* Called with rtnl_lock */
5567 bnx2_change_mac_addr(struct net_device *dev, void *p)
5569 struct sockaddr *addr = p;
5570 struct bnx2 *bp = netdev_priv(dev);
5572 if (!is_valid_ether_addr(addr->sa_data))
5575 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5576 if (netif_running(dev))
5577 bnx2_set_mac_addr(bp);
5582 /* Called with rtnl_lock */
5584 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5586 struct bnx2 *bp = netdev_priv(dev);
5588 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5589 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5593 if (netif_running(dev)) {
5594 bnx2_netif_stop(bp);
5598 bnx2_netif_start(bp);
5603 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5605 poll_bnx2(struct net_device *dev)
5607 struct bnx2 *bp = netdev_priv(dev);
5609 disable_irq(bp->pdev->irq);
5610 bnx2_interrupt(bp->pdev->irq, dev);
5611 enable_irq(bp->pdev->irq);
5615 static int __devinit
5616 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5619 unsigned long mem_len;
5623 SET_MODULE_OWNER(dev);
5624 SET_NETDEV_DEV(dev, &pdev->dev);
5625 bp = netdev_priv(dev);
5630 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5631 rc = pci_enable_device(pdev);
5633 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5637 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5639 "Cannot find PCI device base address, aborting.\n");
5641 goto err_out_disable;
5644 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5646 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5647 goto err_out_disable;
5650 pci_set_master(pdev);
5652 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5653 if (bp->pm_cap == 0) {
5655 "Cannot find power management capability, aborting.\n");
5657 goto err_out_release;
5660 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5661 if (bp->pcix_cap == 0) {
5662 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5664 goto err_out_release;
5667 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5668 bp->flags |= USING_DAC_FLAG;
5669 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5671 "pci_set_consistent_dma_mask failed, aborting.\n");
5673 goto err_out_release;
5676 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5677 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5679 goto err_out_release;
5685 spin_lock_init(&bp->phy_lock);
5686 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5688 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5689 mem_len = MB_GET_CID_ADDR(17);
5690 dev->mem_end = dev->mem_start + mem_len;
5691 dev->irq = pdev->irq;
5693 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5696 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5698 goto err_out_release;
5701 /* Configure byte swap and enable write to the reg_window registers.
5702 * Rely on CPU to do target byte swapping on big endian systems
5703 * The chip's target access swapping will not swap all accesses
5705 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5706 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5707 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5709 bnx2_set_power_state(bp, PCI_D0);
5711 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5713 /* Get bus information. */
5714 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5715 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5718 bp->flags |= PCIX_FLAG;
5720 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5722 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5725 bp->bus_speed_mhz = 133;
5728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5729 bp->bus_speed_mhz = 100;
5732 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5733 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5734 bp->bus_speed_mhz = 66;
5737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5738 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5739 bp->bus_speed_mhz = 50;
5742 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5743 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5744 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5745 bp->bus_speed_mhz = 33;
5750 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5751 bp->bus_speed_mhz = 66;
5753 bp->bus_speed_mhz = 33;
5756 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5757 bp->flags |= PCI_32BIT_FLAG;
5759 /* 5706A0 may falsely detect SERR and PERR. */
5760 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5761 reg = REG_RD(bp, PCI_COMMAND);
5762 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5763 REG_WR(bp, PCI_COMMAND, reg);
5765 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5766 !(bp->flags & PCIX_FLAG)) {
5769 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5773 bnx2_init_nvram(bp);
5775 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5777 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5778 BNX2_SHM_HDR_SIGNATURE_SIG)
5779 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5781 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5783 /* Get the permanent MAC address. First we need to make sure the
5784 * firmware is actually running.
5786 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5788 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5789 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5790 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5795 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5797 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5798 bp->mac_addr[0] = (u8) (reg >> 8);
5799 bp->mac_addr[1] = (u8) reg;
5801 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5802 bp->mac_addr[2] = (u8) (reg >> 24);
5803 bp->mac_addr[3] = (u8) (reg >> 16);
5804 bp->mac_addr[4] = (u8) (reg >> 8);
5805 bp->mac_addr[5] = (u8) reg;
5807 bp->tx_ring_size = MAX_TX_DESC_CNT;
5808 bnx2_set_rx_ring_size(bp, 255);
5812 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5814 bp->tx_quick_cons_trip_int = 20;
5815 bp->tx_quick_cons_trip = 20;
5816 bp->tx_ticks_int = 80;
5819 bp->rx_quick_cons_trip_int = 6;
5820 bp->rx_quick_cons_trip = 6;
5821 bp->rx_ticks_int = 18;
5824 bp->stats_ticks = 1000000 & 0xffff00;
5826 bp->timer_interval = HZ;
5827 bp->current_interval = HZ;
5831 /* Disable WOL support if we are running on a SERDES chip. */
5832 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5833 bp->phy_flags |= PHY_SERDES_FLAG;
5834 bp->flags |= NO_WOL_FLAG;
5835 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5837 reg = REG_RD_IND(bp, bp->shmem_base +
5838 BNX2_SHARED_HW_CFG_CONFIG);
5839 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5840 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5844 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5845 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5846 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5847 bp->flags |= NO_WOL_FLAG;
5849 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5850 bp->tx_quick_cons_trip_int =
5851 bp->tx_quick_cons_trip;
5852 bp->tx_ticks_int = bp->tx_ticks;
5853 bp->rx_quick_cons_trip_int =
5854 bp->rx_quick_cons_trip;
5855 bp->rx_ticks_int = bp->rx_ticks;
5856 bp->comp_prod_trip_int = bp->comp_prod_trip;
5857 bp->com_ticks_int = bp->com_ticks;
5858 bp->cmd_ticks_int = bp->cmd_ticks;
5861 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5863 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5864 * with byte enables disabled on the unused 32-bit word. This is legal
5865 * but causes problems on the AMD 8132 which will eventually stop
5866 * responding after a while.
5868 * AMD believes this incompatibility is unique to the 5706, and
5869 * prefers to locally disable MSI rather than globally disabling it
5870 * using pci_msi_quirk.
5872 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5873 struct pci_dev *amd_8132 = NULL;
5875 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5876 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5880 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5881 if (rev >= 0x10 && rev <= 0x13) {
5883 pci_dev_put(amd_8132);
5889 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5890 bp->req_line_speed = 0;
5891 if (bp->phy_flags & PHY_SERDES_FLAG) {
5892 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5894 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5895 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5896 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5898 bp->req_line_speed = bp->line_speed = SPEED_1000;
5899 bp->req_duplex = DUPLEX_FULL;
5903 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5906 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5908 init_timer(&bp->timer);
5909 bp->timer.expires = RUN_AT(bp->timer_interval);
5910 bp->timer.data = (unsigned long) bp;
5911 bp->timer.function = bnx2_timer;
5917 iounmap(bp->regview);
5922 pci_release_regions(pdev);
5925 pci_disable_device(pdev);
5926 pci_set_drvdata(pdev, NULL);
5932 static int __devinit
5933 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5935 static int version_printed = 0;
5936 struct net_device *dev = NULL;
5940 if (version_printed++ == 0)
5941 printk(KERN_INFO "%s", version);
5943 /* dev zeroed in init_etherdev */
5944 dev = alloc_etherdev(sizeof(*bp));
5949 rc = bnx2_init_board(pdev, dev);
5955 dev->open = bnx2_open;
5956 dev->hard_start_xmit = bnx2_start_xmit;
5957 dev->stop = bnx2_close;
5958 dev->get_stats = bnx2_get_stats;
5959 dev->set_multicast_list = bnx2_set_rx_mode;
5960 dev->do_ioctl = bnx2_ioctl;
5961 dev->set_mac_address = bnx2_change_mac_addr;
5962 dev->change_mtu = bnx2_change_mtu;
5963 dev->tx_timeout = bnx2_tx_timeout;
5964 dev->watchdog_timeo = TX_TIMEOUT;
5966 dev->vlan_rx_register = bnx2_vlan_rx_register;
5967 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5969 dev->poll = bnx2_poll;
5970 dev->ethtool_ops = &bnx2_ethtool_ops;
5973 bp = netdev_priv(dev);
5975 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5976 dev->poll_controller = poll_bnx2;
5979 if ((rc = register_netdev(dev))) {
5980 dev_err(&pdev->dev, "Cannot register net device\n");
5982 iounmap(bp->regview);
5983 pci_release_regions(pdev);
5984 pci_disable_device(pdev);
5985 pci_set_drvdata(pdev, NULL);
5990 pci_set_drvdata(pdev, dev);
5992 memcpy(dev->dev_addr, bp->mac_addr, 6);
5993 memcpy(dev->perm_addr, bp->mac_addr, 6);
5994 bp->name = board_info[ent->driver_data].name,
5995 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5999 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6000 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6001 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6002 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6007 printk("node addr ");
6008 for (i = 0; i < 6; i++)
6009 printk("%2.2x", dev->dev_addr[i]);
6012 dev->features |= NETIF_F_SG;
6013 if (bp->flags & USING_DAC_FLAG)
6014 dev->features |= NETIF_F_HIGHDMA;
6015 dev->features |= NETIF_F_IP_CSUM;
6017 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6020 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6023 netif_carrier_off(bp->dev);
6028 static void __devexit
6029 bnx2_remove_one(struct pci_dev *pdev)
6031 struct net_device *dev = pci_get_drvdata(pdev);
6032 struct bnx2 *bp = netdev_priv(dev);
6034 flush_scheduled_work();
6036 unregister_netdev(dev);
6039 iounmap(bp->regview);
6042 pci_release_regions(pdev);
6043 pci_disable_device(pdev);
6044 pci_set_drvdata(pdev, NULL);
6048 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6050 struct net_device *dev = pci_get_drvdata(pdev);
6051 struct bnx2 *bp = netdev_priv(dev);
6054 if (!netif_running(dev))
6057 flush_scheduled_work();
6058 bnx2_netif_stop(bp);
6059 netif_device_detach(dev);
6060 del_timer_sync(&bp->timer);
6061 if (bp->flags & NO_WOL_FLAG)
6062 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6064 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6066 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6067 bnx2_reset_chip(bp, reset_code);
6069 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6074 bnx2_resume(struct pci_dev *pdev)
6076 struct net_device *dev = pci_get_drvdata(pdev);
6077 struct bnx2 *bp = netdev_priv(dev);
6079 if (!netif_running(dev))
6082 bnx2_set_power_state(bp, PCI_D0);
6083 netif_device_attach(dev);
6085 bnx2_netif_start(bp);
6089 static struct pci_driver bnx2_pci_driver = {
6090 .name = DRV_MODULE_NAME,
6091 .id_table = bnx2_pci_tbl,
6092 .probe = bnx2_init_one,
6093 .remove = __devexit_p(bnx2_remove_one),
6094 .suspend = bnx2_suspend,
6095 .resume = bnx2_resume,
6098 static int __init bnx2_init(void)
6100 return pci_register_driver(&bnx2_pci_driver);
6103 static void __exit bnx2_cleanup(void)
6105 pci_unregister_driver(&bnx2_pci_driver);
6108 module_init(bnx2_init);
6109 module_exit(bnx2_cleanup);