1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
244 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
264 for (i = 0; i < 50; i++) {
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
321 for (i = 0; i < 50; i++) {
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
350 bnx2_disable_int(struct bnx2 *bp)
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
358 bnx2_enable_int(struct bnx2 *bp)
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
371 bnx2_disable_int_sync(struct bnx2 *bp)
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
379 bnx2_netif_stop(struct bnx2 *bp)
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
390 bnx2_netif_start(struct bnx2 *bp)
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
402 bnx2_free_mem(struct bnx2 *bp)
406 if (bp->status_blk) {
407 pci_free_consistent(bp->pdev, bp->status_stats_size,
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
410 bp->stats_blk = NULL;
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
428 vfree(bp->rx_buf_ring);
429 bp->rx_buf_ring = NULL;
433 bnx2_alloc_mem(struct bnx2 *bp)
435 int i, status_blk_size;
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
439 if (bp->tx_buf_ring == NULL)
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
451 if (bp->rx_buf_ring == NULL)
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
477 memset(bp->status_blk, 0, bp->status_stats_size);
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
492 bnx2_report_fw_link(struct bnx2 *bp)
494 u32 fw_link_status = 0;
499 switch (bp->line_speed) {
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
548 bnx2_report_link(struct bnx2 *bp)
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
554 printk("%d Mbps ", bp->line_speed);
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
559 printk("half duplex");
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
568 printk(", transmit ");
570 printk("flow control ON");
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
579 bnx2_report_fw_link(bp);
583 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
585 u32 local_adv, remote_adv;
588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
597 if (bp->duplex != DUPLEX_FULL) {
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
653 bp->flow_ctrl = FLOW_CTRL_TX;
659 bnx2_5708s_linkup(struct bnx2 *bp)
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
682 bp->duplex = DUPLEX_HALF;
688 bnx2_5706s_linkup(struct bnx2 *bp)
690 u32 bmcr, local_adv, remote_adv, common;
693 bp->line_speed = SPEED_1000;
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
700 bp->duplex = DUPLEX_HALF;
703 if (!(bmcr & BMCR_ANENABLE)) {
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
725 bnx2_copper_linkup(struct bnx2 *bp)
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
777 bp->line_speed = SPEED_10;
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
783 bp->duplex = DUPLEX_HALF;
791 bnx2_set_mac_link(struct bnx2 *bp)
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
809 switch (bp->line_speed) {
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
817 val |= BNX2_EMAC_MODE_PORT_MII;
820 val |= BNX2_EMAC_MODE_25G;
823 val |= BNX2_EMAC_MODE_PORT_GMII;
828 val |= BNX2_EMAC_MODE_PORT_GMII;
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
858 bnx2_set_link(struct bnx2 *bp)
863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
868 link_up = bp->link_up;
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
881 bmsr &= ~BMSR_LSTATUS;
884 if (bmsr & BMSR_LSTATUS) {
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
894 bnx2_copper_linkup(bp);
896 bnx2_resolve_flow_ctrl(bp);
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
919 bnx2_set_mac_link(bp);
925 bnx2_reset_phy(struct bnx2 *bp)
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
932 #define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
936 bnx2_read_phy(bp, MII_BMCR, ®);
937 if (!(reg & BMCR_RESET)) {
942 if (i == PHY_RESET_MAX_WAIT) {
949 bnx2_phy_get_pause_adv(struct bnx2 *bp)
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
960 adv = ADVERTISE_PAUSE_CAP;
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
968 adv = ADVERTISE_PAUSE_ASYM;
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
983 bnx2_setup_serdes_phy(struct bnx2 *bp)
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
990 int force_link_down = 0;
992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1015 if (bp->req_duplex == DUPLEX_FULL) {
1016 adv |= ADVERTISE_1000XFULL;
1017 new_bmcr |= BMCR_FULLDPLX;
1020 adv |= ADVERTISE_1000XHALF;
1021 new_bmcr &= ~BMCR_FULLDPLX;
1023 if ((new_bmcr != bmcr) || (force_link_down)) {
1024 /* Force a link down visible on the other side */
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
1029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1033 netif_carrier_off(bp->dev);
1034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1035 bnx2_report_link(bp);
1037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1062 spin_unlock_bh(&bp->phy_lock);
1064 spin_lock_bh(&bp->phy_lock);
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1070 /* Speed up link-up time when the link partner
1071 * does not autonegotiate which is very common
1072 * in blade servers. Some blade servers use
1073 * IPMI for kerboard input and it's important
1074 * to minimize link disruptions. Autoneg. involves
1075 * exchanging base pages plus 3 next pages and
1076 * normally completes in about 120 msec.
1078 bp->current_interval = SERDES_AN_TIMEOUT;
1079 bp->serdes_an_pending = 1;
1080 mod_timer(&bp->timer, jiffies + bp->current_interval);
1086 #define ETHTOOL_ALL_FIBRE_SPEED \
1087 (ADVERTISED_1000baseT_Full)
1089 #define ETHTOOL_ALL_COPPER_SPEED \
1090 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1091 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1092 ADVERTISED_1000baseT_Full)
1094 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1095 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1097 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1100 bnx2_setup_copper_phy(struct bnx2 *bp)
1105 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1107 if (bp->autoneg & AUTONEG_SPEED) {
1108 u32 adv_reg, adv1000_reg;
1109 u32 new_adv_reg = 0;
1110 u32 new_adv1000_reg = 0;
1112 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1113 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1114 ADVERTISE_PAUSE_ASYM);
1116 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1117 adv1000_reg &= PHY_ALL_1000_SPEED;
1119 if (bp->advertising & ADVERTISED_10baseT_Half)
1120 new_adv_reg |= ADVERTISE_10HALF;
1121 if (bp->advertising & ADVERTISED_10baseT_Full)
1122 new_adv_reg |= ADVERTISE_10FULL;
1123 if (bp->advertising & ADVERTISED_100baseT_Half)
1124 new_adv_reg |= ADVERTISE_100HALF;
1125 if (bp->advertising & ADVERTISED_100baseT_Full)
1126 new_adv_reg |= ADVERTISE_100FULL;
1127 if (bp->advertising & ADVERTISED_1000baseT_Full)
1128 new_adv1000_reg |= ADVERTISE_1000FULL;
1130 new_adv_reg |= ADVERTISE_CSMA;
1132 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1134 if ((adv1000_reg != new_adv1000_reg) ||
1135 (adv_reg != new_adv_reg) ||
1136 ((bmcr & BMCR_ANENABLE) == 0)) {
1138 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1139 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1140 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1143 else if (bp->link_up) {
1144 /* Flow ctrl may have changed from auto to forced */
1145 /* or vice-versa. */
1147 bnx2_resolve_flow_ctrl(bp);
1148 bnx2_set_mac_link(bp);
1154 if (bp->req_line_speed == SPEED_100) {
1155 new_bmcr |= BMCR_SPEED100;
1157 if (bp->req_duplex == DUPLEX_FULL) {
1158 new_bmcr |= BMCR_FULLDPLX;
1160 if (new_bmcr != bmcr) {
1163 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1164 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1166 if (bmsr & BMSR_LSTATUS) {
1167 /* Force link down */
1168 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1169 spin_unlock_bh(&bp->phy_lock);
1171 spin_lock_bh(&bp->phy_lock);
1173 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1177 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1179 /* Normally, the new speed is setup after the link has
1180 * gone down and up again. In some cases, link will not go
1181 * down so we need to set up the new speed here.
1183 if (bmsr & BMSR_LSTATUS) {
1184 bp->line_speed = bp->req_line_speed;
1185 bp->duplex = bp->req_duplex;
1186 bnx2_resolve_flow_ctrl(bp);
1187 bnx2_set_mac_link(bp);
1194 bnx2_setup_phy(struct bnx2 *bp)
1196 if (bp->loopback == MAC_LOOPBACK)
1199 if (bp->phy_flags & PHY_SERDES_FLAG) {
1200 return (bnx2_setup_serdes_phy(bp));
1203 return (bnx2_setup_copper_phy(bp));
1208 bnx2_init_5708s_phy(struct bnx2 *bp)
1212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1213 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1214 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1216 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1217 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1218 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1220 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1221 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1222 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1224 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1225 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1226 val |= BCM5708S_UP1_2G5;
1227 bnx2_write_phy(bp, BCM5708S_UP1, val);
1230 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1231 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1232 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1233 /* increase tx signal amplitude */
1234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1235 BCM5708S_BLK_ADDR_TX_MISC);
1236 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1237 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1238 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1242 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1243 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1248 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1249 BNX2_SHARED_HW_CFG_CONFIG);
1250 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1252 BCM5708S_BLK_ADDR_TX_MISC);
1253 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_DIG);
1262 bnx2_init_5706s_phy(struct bnx2 *bp)
1264 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1266 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1267 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1270 if (bp->dev->mtu > 1500) {
1273 /* Set extended packet length bit */
1274 bnx2_write_phy(bp, 0x18, 0x7);
1275 bnx2_read_phy(bp, 0x18, &val);
1276 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1278 bnx2_write_phy(bp, 0x1c, 0x6c00);
1279 bnx2_read_phy(bp, 0x1c, &val);
1280 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1285 bnx2_write_phy(bp, 0x18, 0x7);
1286 bnx2_read_phy(bp, 0x18, &val);
1287 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1289 bnx2_write_phy(bp, 0x1c, 0x6c00);
1290 bnx2_read_phy(bp, 0x1c, &val);
1291 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1298 bnx2_init_copper_phy(struct bnx2 *bp)
1302 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1304 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1305 bnx2_write_phy(bp, 0x18, 0x0c00);
1306 bnx2_write_phy(bp, 0x17, 0x000a);
1307 bnx2_write_phy(bp, 0x15, 0x310b);
1308 bnx2_write_phy(bp, 0x17, 0x201f);
1309 bnx2_write_phy(bp, 0x15, 0x9506);
1310 bnx2_write_phy(bp, 0x17, 0x401f);
1311 bnx2_write_phy(bp, 0x15, 0x14e2);
1312 bnx2_write_phy(bp, 0x18, 0x0400);
1315 if (bp->dev->mtu > 1500) {
1316 /* Set extended packet length bit */
1317 bnx2_write_phy(bp, 0x18, 0x7);
1318 bnx2_read_phy(bp, 0x18, &val);
1319 bnx2_write_phy(bp, 0x18, val | 0x4000);
1321 bnx2_read_phy(bp, 0x10, &val);
1322 bnx2_write_phy(bp, 0x10, val | 0x1);
1325 bnx2_write_phy(bp, 0x18, 0x7);
1326 bnx2_read_phy(bp, 0x18, &val);
1327 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1329 bnx2_read_phy(bp, 0x10, &val);
1330 bnx2_write_phy(bp, 0x10, val & ~0x1);
1333 /* ethernet@wirespeed */
1334 bnx2_write_phy(bp, 0x18, 0x7007);
1335 bnx2_read_phy(bp, 0x18, &val);
1336 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1342 bnx2_init_phy(struct bnx2 *bp)
1347 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1348 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1350 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1354 bnx2_read_phy(bp, MII_PHYSID1, &val);
1355 bp->phy_id = val << 16;
1356 bnx2_read_phy(bp, MII_PHYSID2, &val);
1357 bp->phy_id |= val & 0xffff;
1359 if (bp->phy_flags & PHY_SERDES_FLAG) {
1360 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1361 rc = bnx2_init_5706s_phy(bp);
1362 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1363 rc = bnx2_init_5708s_phy(bp);
1366 rc = bnx2_init_copper_phy(bp);
1375 bnx2_set_mac_loopback(struct bnx2 *bp)
1379 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1380 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1381 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1382 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1387 static int bnx2_test_link(struct bnx2 *);
1390 bnx2_set_phy_loopback(struct bnx2 *bp)
1395 spin_lock_bh(&bp->phy_lock);
1396 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1398 spin_unlock_bh(&bp->phy_lock);
1402 for (i = 0; i < 10; i++) {
1403 if (bnx2_test_link(bp) == 0)
1408 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1409 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1410 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1411 BNX2_EMAC_MODE_25G);
1413 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1414 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1420 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1426 msg_data |= bp->fw_wr_seq;
1428 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1430 /* wait for an acknowledgement. */
1431 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1434 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1436 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1439 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1442 /* If we timed out, inform the firmware that this is the case. */
1443 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1445 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1448 msg_data &= ~BNX2_DRV_MSG_CODE;
1449 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1451 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1456 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1463 bnx2_init_context(struct bnx2 *bp)
1469 u32 vcid_addr, pcid_addr, offset;
1473 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1476 vcid_addr = GET_PCID_ADDR(vcid);
1478 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1483 pcid_addr = GET_PCID_ADDR(new_vcid);
1486 vcid_addr = GET_CID_ADDR(vcid);
1487 pcid_addr = vcid_addr;
1490 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1491 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1493 /* Zero out the context. */
1494 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1495 CTX_WR(bp, 0x00, offset, 0);
1498 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1499 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1504 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1510 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1511 if (good_mbuf == NULL) {
1512 printk(KERN_ERR PFX "Failed to allocate memory in "
1513 "bnx2_alloc_bad_rbuf\n");
1517 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1518 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1522 /* Allocate a bunch of mbufs and save the good ones in an array. */
1523 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1524 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1525 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1527 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1529 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1531 /* The addresses with Bit 9 set are bad memory blocks. */
1532 if (!(val & (1 << 9))) {
1533 good_mbuf[good_mbuf_cnt] = (u16) val;
1537 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1540 /* Free the good ones back to the mbuf pool thus discarding
1541 * all the bad ones. */
1542 while (good_mbuf_cnt) {
1545 val = good_mbuf[good_mbuf_cnt];
1546 val = (val << 9) | val | 1;
1548 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1555 bnx2_set_mac_addr(struct bnx2 *bp)
1558 u8 *mac_addr = bp->dev->dev_addr;
1560 val = (mac_addr[0] << 8) | mac_addr[1];
1562 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1564 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1565 (mac_addr[4] << 8) | mac_addr[5];
1567 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1571 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1573 struct sk_buff *skb;
1574 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1576 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1577 unsigned long align;
1579 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1584 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1585 skb_reserve(skb, 8 - align);
1588 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1589 PCI_DMA_FROMDEVICE);
1592 pci_unmap_addr_set(rx_buf, mapping, mapping);
1594 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1595 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1597 bp->rx_prod_bseq += bp->rx_buf_use_size;
1603 bnx2_phy_int(struct bnx2 *bp)
1605 u32 new_link_state, old_link_state;
1607 new_link_state = bp->status_blk->status_attn_bits &
1608 STATUS_ATTN_BITS_LINK_STATE;
1609 old_link_state = bp->status_blk->status_attn_bits_ack &
1610 STATUS_ATTN_BITS_LINK_STATE;
1611 if (new_link_state != old_link_state) {
1612 if (new_link_state) {
1613 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1614 STATUS_ATTN_BITS_LINK_STATE);
1617 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1618 STATUS_ATTN_BITS_LINK_STATE);
1625 bnx2_tx_int(struct bnx2 *bp)
1627 struct status_block *sblk = bp->status_blk;
1628 u16 hw_cons, sw_cons, sw_ring_cons;
1631 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1632 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1635 sw_cons = bp->tx_cons;
1637 while (sw_cons != hw_cons) {
1638 struct sw_bd *tx_buf;
1639 struct sk_buff *skb;
1642 sw_ring_cons = TX_RING_IDX(sw_cons);
1644 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1647 /* partial BD completions possible with TSO packets */
1648 if (skb_is_gso(skb)) {
1649 u16 last_idx, last_ring_idx;
1651 last_idx = sw_cons +
1652 skb_shinfo(skb)->nr_frags + 1;
1653 last_ring_idx = sw_ring_cons +
1654 skb_shinfo(skb)->nr_frags + 1;
1655 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1658 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1663 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1664 skb_headlen(skb), PCI_DMA_TODEVICE);
1667 last = skb_shinfo(skb)->nr_frags;
1669 for (i = 0; i < last; i++) {
1670 sw_cons = NEXT_TX_BD(sw_cons);
1672 pci_unmap_page(bp->pdev,
1674 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1676 skb_shinfo(skb)->frags[i].size,
1680 sw_cons = NEXT_TX_BD(sw_cons);
1682 tx_free_bd += last + 1;
1686 hw_cons = bp->hw_tx_cons =
1687 sblk->status_tx_quick_consumer_index0;
1689 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1694 bp->tx_cons = sw_cons;
1695 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1696 * before checking for netif_queue_stopped(). Without the
1697 * memory barrier, there is a small possibility that bnx2_start_xmit()
1698 * will miss it and cause the queue to be stopped forever.
1702 if (unlikely(netif_queue_stopped(bp->dev)) &&
1703 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1704 netif_tx_lock(bp->dev);
1705 if ((netif_queue_stopped(bp->dev)) &&
1706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1707 netif_wake_queue(bp->dev);
1708 netif_tx_unlock(bp->dev);
1713 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1716 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1717 struct rx_bd *cons_bd, *prod_bd;
1719 cons_rx_buf = &bp->rx_buf_ring[cons];
1720 prod_rx_buf = &bp->rx_buf_ring[prod];
1722 pci_dma_sync_single_for_device(bp->pdev,
1723 pci_unmap_addr(cons_rx_buf, mapping),
1724 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1726 bp->rx_prod_bseq += bp->rx_buf_use_size;
1728 prod_rx_buf->skb = skb;
1733 pci_unmap_addr_set(prod_rx_buf, mapping,
1734 pci_unmap_addr(cons_rx_buf, mapping));
1736 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1737 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1743 bnx2_rx_int(struct bnx2 *bp, int budget)
1745 struct status_block *sblk = bp->status_blk;
1746 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1747 struct l2_fhdr *rx_hdr;
1750 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1751 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1754 sw_cons = bp->rx_cons;
1755 sw_prod = bp->rx_prod;
1757 /* Memory barrier necessary as speculative reads of the rx
1758 * buffer can be ahead of the index in the status block
1761 while (sw_cons != hw_cons) {
1764 struct sw_bd *rx_buf;
1765 struct sk_buff *skb;
1766 dma_addr_t dma_addr;
1768 sw_ring_cons = RX_RING_IDX(sw_cons);
1769 sw_ring_prod = RX_RING_IDX(sw_prod);
1771 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1776 dma_addr = pci_unmap_addr(rx_buf, mapping);
1778 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1779 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1781 rx_hdr = (struct l2_fhdr *) skb->data;
1782 len = rx_hdr->l2_fhdr_pkt_len - 4;
1784 if ((status = rx_hdr->l2_fhdr_status) &
1785 (L2_FHDR_ERRORS_BAD_CRC |
1786 L2_FHDR_ERRORS_PHY_DECODE |
1787 L2_FHDR_ERRORS_ALIGNMENT |
1788 L2_FHDR_ERRORS_TOO_SHORT |
1789 L2_FHDR_ERRORS_GIANT_FRAME)) {
1794 /* Since we don't have a jumbo ring, copy small packets
1797 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1798 struct sk_buff *new_skb;
1800 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1801 if (new_skb == NULL)
1805 memcpy(new_skb->data,
1806 skb->data + bp->rx_offset - 2,
1809 skb_reserve(new_skb, 2);
1810 skb_put(new_skb, len);
1812 bnx2_reuse_rx_skb(bp, skb,
1813 sw_ring_cons, sw_ring_prod);
1817 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1818 pci_unmap_single(bp->pdev, dma_addr,
1819 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1821 skb_reserve(skb, bp->rx_offset);
1826 bnx2_reuse_rx_skb(bp, skb,
1827 sw_ring_cons, sw_ring_prod);
1831 skb->protocol = eth_type_trans(skb, bp->dev);
1833 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1834 (ntohs(skb->protocol) != 0x8100)) {
1841 skb->ip_summed = CHECKSUM_NONE;
1843 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1844 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1846 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1847 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1848 skb->ip_summed = CHECKSUM_UNNECESSARY;
1852 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1853 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1854 rx_hdr->l2_fhdr_vlan_tag);
1858 netif_receive_skb(skb);
1860 bp->dev->last_rx = jiffies;
1864 sw_cons = NEXT_RX_BD(sw_cons);
1865 sw_prod = NEXT_RX_BD(sw_prod);
1867 if ((rx_pkt == budget))
1870 /* Refresh hw_cons to see if there is new work */
1871 if (sw_cons == hw_cons) {
1872 hw_cons = bp->hw_rx_cons =
1873 sblk->status_rx_quick_consumer_index0;
1874 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1879 bp->rx_cons = sw_cons;
1880 bp->rx_prod = sw_prod;
1882 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1884 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1892 /* MSI ISR - The only difference between this and the INTx ISR
1893 * is that the MSI interrupt is always serviced.
1896 bnx2_msi(int irq, void *dev_instance)
1898 struct net_device *dev = dev_instance;
1899 struct bnx2 *bp = netdev_priv(dev);
1901 prefetch(bp->status_blk);
1902 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1903 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1904 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1906 /* Return here if interrupt is disabled. */
1907 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1910 netif_rx_schedule(dev);
1916 bnx2_interrupt(int irq, void *dev_instance)
1918 struct net_device *dev = dev_instance;
1919 struct bnx2 *bp = netdev_priv(dev);
1921 /* When using INTx, it is possible for the interrupt to arrive
1922 * at the CPU before the status block posted prior to the
1923 * interrupt. Reading a register will flush the status block.
1924 * When using MSI, the MSI message will always complete after
1925 * the status block write.
1927 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1928 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1929 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1932 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1933 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1934 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1936 /* Return here if interrupt is shared and is disabled. */
1937 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1940 netif_rx_schedule(dev);
1946 bnx2_has_work(struct bnx2 *bp)
1948 struct status_block *sblk = bp->status_blk;
1950 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1951 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1954 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1962 bnx2_poll(struct net_device *dev, int *budget)
1964 struct bnx2 *bp = netdev_priv(dev);
1966 if ((bp->status_blk->status_attn_bits &
1967 STATUS_ATTN_BITS_LINK_STATE) !=
1968 (bp->status_blk->status_attn_bits_ack &
1969 STATUS_ATTN_BITS_LINK_STATE)) {
1971 spin_lock(&bp->phy_lock);
1973 spin_unlock(&bp->phy_lock);
1975 /* This is needed to take care of transient status
1976 * during link changes.
1978 REG_WR(bp, BNX2_HC_COMMAND,
1979 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1980 REG_RD(bp, BNX2_HC_COMMAND);
1983 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
1986 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
1987 int orig_budget = *budget;
1990 if (orig_budget > dev->quota)
1991 orig_budget = dev->quota;
1993 work_done = bnx2_rx_int(bp, orig_budget);
1994 *budget -= work_done;
1995 dev->quota -= work_done;
1998 bp->last_status_idx = bp->status_blk->status_idx;
2001 if (!bnx2_has_work(bp)) {
2002 netif_rx_complete(dev);
2003 if (likely(bp->flags & USING_MSI_FLAG)) {
2004 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2005 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2006 bp->last_status_idx);
2009 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2010 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2011 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2012 bp->last_status_idx);
2014 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2015 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2016 bp->last_status_idx);
2023 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2024 * from set_multicast.
2027 bnx2_set_rx_mode(struct net_device *dev)
2029 struct bnx2 *bp = netdev_priv(dev);
2030 u32 rx_mode, sort_mode;
2033 spin_lock_bh(&bp->phy_lock);
2035 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2036 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2037 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2039 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2040 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2042 if (!(bp->flags & ASF_ENABLE_FLAG))
2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2045 if (dev->flags & IFF_PROMISC) {
2046 /* Promiscuous mode. */
2047 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2048 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2049 BNX2_RPM_SORT_USER0_PROM_VLAN;
2051 else if (dev->flags & IFF_ALLMULTI) {
2052 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2053 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2056 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2059 /* Accept one or more multicast(s). */
2060 struct dev_mc_list *mclist;
2061 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2066 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2068 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2069 i++, mclist = mclist->next) {
2071 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2073 regidx = (bit & 0xe0) >> 5;
2075 mc_filter[regidx] |= (1 << bit);
2078 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2079 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2083 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2086 if (rx_mode != bp->rx_mode) {
2087 bp->rx_mode = rx_mode;
2088 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2091 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2092 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2093 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2095 spin_unlock_bh(&bp->phy_lock);
2098 #define FW_BUF_SIZE 0x8000
2101 bnx2_gunzip_init(struct bnx2 *bp)
2103 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2106 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2109 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2110 if (bp->strm->workspace == NULL)
2120 vfree(bp->gunzip_buf);
2121 bp->gunzip_buf = NULL;
2124 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2125 "uncompression.\n", bp->dev->name);
2130 bnx2_gunzip_end(struct bnx2 *bp)
2132 kfree(bp->strm->workspace);
2137 if (bp->gunzip_buf) {
2138 vfree(bp->gunzip_buf);
2139 bp->gunzip_buf = NULL;
2144 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2148 /* check gzip header */
2149 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2155 if (zbuf[3] & FNAME)
2156 while ((zbuf[n++] != 0) && (n < len));
2158 bp->strm->next_in = zbuf + n;
2159 bp->strm->avail_in = len - n;
2160 bp->strm->next_out = bp->gunzip_buf;
2161 bp->strm->avail_out = FW_BUF_SIZE;
2163 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2167 rc = zlib_inflate(bp->strm, Z_FINISH);
2169 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2170 *outbuf = bp->gunzip_buf;
2172 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2173 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2174 bp->dev->name, bp->strm->msg);
2176 zlib_inflateEnd(bp->strm);
2178 if (rc == Z_STREAM_END)
2185 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2192 for (i = 0; i < rv2p_code_len; i += 8) {
2193 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2195 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2198 if (rv2p_proc == RV2P_PROC1) {
2199 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2200 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2203 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2204 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2208 /* Reset the processor, un-stall is done later. */
2209 if (rv2p_proc == RV2P_PROC1) {
2210 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2218 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2225 val = REG_RD_IND(bp, cpu_reg->mode);
2226 val |= cpu_reg->mode_value_halt;
2227 REG_WR_IND(bp, cpu_reg->mode, val);
2228 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2230 /* Load the Text area. */
2231 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2236 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2246 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2247 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2251 /* Load the Data area. */
2252 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2256 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2257 REG_WR_IND(bp, offset, fw->data[j]);
2261 /* Load the SBSS area. */
2262 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2266 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2267 REG_WR_IND(bp, offset, fw->sbss[j]);
2271 /* Load the BSS area. */
2272 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2276 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2277 REG_WR_IND(bp, offset, fw->bss[j]);
2281 /* Load the Read-Only area. */
2282 offset = cpu_reg->spad_base +
2283 (fw->rodata_addr - cpu_reg->mips_view_base);
2287 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2288 REG_WR_IND(bp, offset, fw->rodata[j]);
2292 /* Clear the pre-fetch instruction. */
2293 REG_WR_IND(bp, cpu_reg->inst, 0);
2294 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2296 /* Start the CPU. */
2297 val = REG_RD_IND(bp, cpu_reg->mode);
2298 val &= ~cpu_reg->mode_value_halt;
2299 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2300 REG_WR_IND(bp, cpu_reg->mode, val);
2306 bnx2_init_cpus(struct bnx2 *bp)
2308 struct cpu_reg cpu_reg;
2314 if ((rc = bnx2_gunzip_init(bp)) != 0)
2317 /* Initialize the RV2P processor. */
2318 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2323 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2325 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2330 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2332 /* Initialize the RX Processor. */
2333 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2334 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2335 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2336 cpu_reg.state = BNX2_RXP_CPU_STATE;
2337 cpu_reg.state_value_clear = 0xffffff;
2338 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2339 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2340 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2341 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2342 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2343 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2344 cpu_reg.mips_view_base = 0x8000000;
2346 fw = &bnx2_rxp_fw_06;
2348 rc = load_cpu_fw(bp, &cpu_reg, fw);
2352 /* Initialize the TX Processor. */
2353 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2354 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2355 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2356 cpu_reg.state = BNX2_TXP_CPU_STATE;
2357 cpu_reg.state_value_clear = 0xffffff;
2358 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2359 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2360 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2361 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2362 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2363 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2364 cpu_reg.mips_view_base = 0x8000000;
2366 fw = &bnx2_txp_fw_06;
2368 rc = load_cpu_fw(bp, &cpu_reg, fw);
2372 /* Initialize the TX Patch-up Processor. */
2373 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2374 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2375 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2376 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2377 cpu_reg.state_value_clear = 0xffffff;
2378 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2379 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2380 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2381 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2382 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2383 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2384 cpu_reg.mips_view_base = 0x8000000;
2386 fw = &bnx2_tpat_fw_06;
2388 rc = load_cpu_fw(bp, &cpu_reg, fw);
2392 /* Initialize the Completion Processor. */
2393 cpu_reg.mode = BNX2_COM_CPU_MODE;
2394 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2395 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2396 cpu_reg.state = BNX2_COM_CPU_STATE;
2397 cpu_reg.state_value_clear = 0xffffff;
2398 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2399 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2400 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2401 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2402 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2403 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2404 cpu_reg.mips_view_base = 0x8000000;
2406 fw = &bnx2_com_fw_06;
2408 rc = load_cpu_fw(bp, &cpu_reg, fw);
2413 bnx2_gunzip_end(bp);
2418 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2422 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2428 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2429 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2430 PCI_PM_CTRL_PME_STATUS);
2432 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2433 /* delay required during transition out of D3hot */
2436 val = REG_RD(bp, BNX2_EMAC_MODE);
2437 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2438 val &= ~BNX2_EMAC_MODE_MPKT;
2439 REG_WR(bp, BNX2_EMAC_MODE, val);
2441 val = REG_RD(bp, BNX2_RPM_CONFIG);
2442 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2443 REG_WR(bp, BNX2_RPM_CONFIG, val);
2454 autoneg = bp->autoneg;
2455 advertising = bp->advertising;
2457 bp->autoneg = AUTONEG_SPEED;
2458 bp->advertising = ADVERTISED_10baseT_Half |
2459 ADVERTISED_10baseT_Full |
2460 ADVERTISED_100baseT_Half |
2461 ADVERTISED_100baseT_Full |
2464 bnx2_setup_copper_phy(bp);
2466 bp->autoneg = autoneg;
2467 bp->advertising = advertising;
2469 bnx2_set_mac_addr(bp);
2471 val = REG_RD(bp, BNX2_EMAC_MODE);
2473 /* Enable port mode. */
2474 val &= ~BNX2_EMAC_MODE_PORT;
2475 val |= BNX2_EMAC_MODE_PORT_MII |
2476 BNX2_EMAC_MODE_MPKT_RCVD |
2477 BNX2_EMAC_MODE_ACPI_RCVD |
2478 BNX2_EMAC_MODE_MPKT;
2480 REG_WR(bp, BNX2_EMAC_MODE, val);
2482 /* receive all multicast */
2483 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2484 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2487 REG_WR(bp, BNX2_EMAC_RX_MODE,
2488 BNX2_EMAC_RX_MODE_SORT_MODE);
2490 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2491 BNX2_RPM_SORT_USER0_MC_EN;
2492 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2493 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2494 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2495 BNX2_RPM_SORT_USER0_ENA);
2497 /* Need to enable EMAC and RPM for WOL. */
2498 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2499 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2500 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2501 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2503 val = REG_RD(bp, BNX2_RPM_CONFIG);
2504 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2505 REG_WR(bp, BNX2_RPM_CONFIG, val);
2507 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2510 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2513 if (!(bp->flags & NO_WOL_FLAG))
2514 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2516 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2517 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2518 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2527 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2529 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2532 /* No more memory access after this point until
2533 * device is brought back to D0.
2545 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2550 /* Request access to the flash interface. */
2551 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2552 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2553 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2554 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2560 if (j >= NVRAM_TIMEOUT_COUNT)
2567 bnx2_release_nvram_lock(struct bnx2 *bp)
2572 /* Relinquish nvram interface. */
2573 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2575 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2576 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2577 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2583 if (j >= NVRAM_TIMEOUT_COUNT)
2591 bnx2_enable_nvram_write(struct bnx2 *bp)
2595 val = REG_RD(bp, BNX2_MISC_CFG);
2596 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2598 if (!bp->flash_info->buffered) {
2601 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2602 REG_WR(bp, BNX2_NVM_COMMAND,
2603 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2605 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2608 val = REG_RD(bp, BNX2_NVM_COMMAND);
2609 if (val & BNX2_NVM_COMMAND_DONE)
2613 if (j >= NVRAM_TIMEOUT_COUNT)
2620 bnx2_disable_nvram_write(struct bnx2 *bp)
2624 val = REG_RD(bp, BNX2_MISC_CFG);
2625 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2630 bnx2_enable_nvram_access(struct bnx2 *bp)
2634 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2635 /* Enable both bits, even on read. */
2636 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2637 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2641 bnx2_disable_nvram_access(struct bnx2 *bp)
2645 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2646 /* Disable both bits, even after read. */
2647 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2648 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2649 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2653 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2658 if (bp->flash_info->buffered)
2659 /* Buffered flash, no erase needed */
2662 /* Build an erase command */
2663 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2664 BNX2_NVM_COMMAND_DOIT;
2666 /* Need to clear DONE bit separately. */
2667 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669 /* Address of the NVRAM to read from. */
2670 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2672 /* Issue an erase command. */
2673 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2675 /* Wait for completion. */
2676 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2681 val = REG_RD(bp, BNX2_NVM_COMMAND);
2682 if (val & BNX2_NVM_COMMAND_DONE)
2686 if (j >= NVRAM_TIMEOUT_COUNT)
2693 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2698 /* Build the command word. */
2699 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2701 /* Calculate an offset of a buffered flash. */
2702 if (bp->flash_info->buffered) {
2703 offset = ((offset / bp->flash_info->page_size) <<
2704 bp->flash_info->page_bits) +
2705 (offset % bp->flash_info->page_size);
2708 /* Need to clear DONE bit separately. */
2709 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2711 /* Address of the NVRAM to read from. */
2712 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2714 /* Issue a read command. */
2715 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2717 /* Wait for completion. */
2718 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2723 val = REG_RD(bp, BNX2_NVM_COMMAND);
2724 if (val & BNX2_NVM_COMMAND_DONE) {
2725 val = REG_RD(bp, BNX2_NVM_READ);
2727 val = be32_to_cpu(val);
2728 memcpy(ret_val, &val, 4);
2732 if (j >= NVRAM_TIMEOUT_COUNT)
2740 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2745 /* Build the command word. */
2746 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2748 /* Calculate an offset of a buffered flash. */
2749 if (bp->flash_info->buffered) {
2750 offset = ((offset / bp->flash_info->page_size) <<
2751 bp->flash_info->page_bits) +
2752 (offset % bp->flash_info->page_size);
2755 /* Need to clear DONE bit separately. */
2756 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2758 memcpy(&val32, val, 4);
2759 val32 = cpu_to_be32(val32);
2761 /* Write the data. */
2762 REG_WR(bp, BNX2_NVM_WRITE, val32);
2764 /* Address of the NVRAM to write to. */
2765 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2767 /* Issue the write command. */
2768 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2770 /* Wait for completion. */
2771 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2774 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2777 if (j >= NVRAM_TIMEOUT_COUNT)
2784 bnx2_init_nvram(struct bnx2 *bp)
2787 int j, entry_count, rc;
2788 struct flash_spec *flash;
2790 /* Determine the selected interface. */
2791 val = REG_RD(bp, BNX2_NVM_CFG1);
2793 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2796 if (val & 0x40000000) {
2798 /* Flash interface has been reconfigured */
2799 for (j = 0, flash = &flash_table[0]; j < entry_count;
2801 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2802 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2803 bp->flash_info = flash;
2810 /* Not yet been reconfigured */
2812 if (val & (1 << 23))
2813 mask = FLASH_BACKUP_STRAP_MASK;
2815 mask = FLASH_STRAP_MASK;
2817 for (j = 0, flash = &flash_table[0]; j < entry_count;
2820 if ((val & mask) == (flash->strapping & mask)) {
2821 bp->flash_info = flash;
2823 /* Request access to the flash interface. */
2824 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2827 /* Enable access to flash interface */
2828 bnx2_enable_nvram_access(bp);
2830 /* Reconfigure the flash interface */
2831 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2832 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2833 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2834 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2836 /* Disable access to flash interface */
2837 bnx2_disable_nvram_access(bp);
2838 bnx2_release_nvram_lock(bp);
2843 } /* if (val & 0x40000000) */
2845 if (j == entry_count) {
2846 bp->flash_info = NULL;
2847 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2851 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2852 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2854 bp->flash_size = val;
2856 bp->flash_size = bp->flash_info->total_size;
2862 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2866 u32 cmd_flags, offset32, len32, extra;
2871 /* Request access to the flash interface. */
2872 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2875 /* Enable access to flash interface */
2876 bnx2_enable_nvram_access(bp);
2889 pre_len = 4 - (offset & 3);
2891 if (pre_len >= len32) {
2893 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2894 BNX2_NVM_COMMAND_LAST;
2897 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2900 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2905 memcpy(ret_buf, buf + (offset & 3), pre_len);
2912 extra = 4 - (len32 & 3);
2913 len32 = (len32 + 4) & ~3;
2920 cmd_flags = BNX2_NVM_COMMAND_LAST;
2922 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2923 BNX2_NVM_COMMAND_LAST;
2925 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2927 memcpy(ret_buf, buf, 4 - extra);
2929 else if (len32 > 0) {
2932 /* Read the first word. */
2936 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2938 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
2940 /* Advance to the next dword. */
2945 while (len32 > 4 && rc == 0) {
2946 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
2948 /* Advance to the next dword. */
2957 cmd_flags = BNX2_NVM_COMMAND_LAST;
2958 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2960 memcpy(ret_buf, buf, 4 - extra);
2963 /* Disable access to flash interface */
2964 bnx2_disable_nvram_access(bp);
2966 bnx2_release_nvram_lock(bp);
2972 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
2975 u32 written, offset32, len32;
2976 u8 *buf, start[4], end[4], *flash_buffer = NULL;
2978 int align_start, align_end;
2983 align_start = align_end = 0;
2985 if ((align_start = (offset32 & 3))) {
2987 len32 += align_start;
2988 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
2993 if ((len32 > 4) || !align_start) {
2994 align_end = 4 - (len32 & 3);
2996 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3003 if (align_start || align_end) {
3004 buf = kmalloc(len32, GFP_KERNEL);
3008 memcpy(buf, start, 4);
3011 memcpy(buf + len32 - 4, end, 4);
3013 memcpy(buf + align_start, data_buf, buf_size);
3016 if (bp->flash_info->buffered == 0) {
3017 flash_buffer = kmalloc(264, GFP_KERNEL);
3018 if (flash_buffer == NULL) {
3020 goto nvram_write_end;
3025 while ((written < len32) && (rc == 0)) {
3026 u32 page_start, page_end, data_start, data_end;
3027 u32 addr, cmd_flags;
3030 /* Find the page_start addr */
3031 page_start = offset32 + written;
3032 page_start -= (page_start % bp->flash_info->page_size);
3033 /* Find the page_end addr */
3034 page_end = page_start + bp->flash_info->page_size;
3035 /* Find the data_start addr */
3036 data_start = (written == 0) ? offset32 : page_start;
3037 /* Find the data_end addr */
3038 data_end = (page_end > offset32 + len32) ?
3039 (offset32 + len32) : page_end;
3041 /* Request access to the flash interface. */
3042 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3043 goto nvram_write_end;
3045 /* Enable access to flash interface */
3046 bnx2_enable_nvram_access(bp);
3048 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3049 if (bp->flash_info->buffered == 0) {
3052 /* Read the whole page into the buffer
3053 * (non-buffer flash only) */
3054 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3055 if (j == (bp->flash_info->page_size - 4)) {
3056 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3058 rc = bnx2_nvram_read_dword(bp,
3064 goto nvram_write_end;
3070 /* Enable writes to flash interface (unlock write-protect) */
3071 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3072 goto nvram_write_end;
3074 /* Erase the page */
3075 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3076 goto nvram_write_end;
3078 /* Re-enable the write again for the actual write */
3079 bnx2_enable_nvram_write(bp);
3081 /* Loop to write back the buffer data from page_start to
3084 if (bp->flash_info->buffered == 0) {
3085 for (addr = page_start; addr < data_start;
3086 addr += 4, i += 4) {
3088 rc = bnx2_nvram_write_dword(bp, addr,
3089 &flash_buffer[i], cmd_flags);
3092 goto nvram_write_end;
3098 /* Loop to write the new data from data_start to data_end */
3099 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3100 if ((addr == page_end - 4) ||
3101 ((bp->flash_info->buffered) &&
3102 (addr == data_end - 4))) {
3104 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3106 rc = bnx2_nvram_write_dword(bp, addr, buf,
3110 goto nvram_write_end;
3116 /* Loop to write back the buffer data from data_end
3118 if (bp->flash_info->buffered == 0) {
3119 for (addr = data_end; addr < page_end;
3120 addr += 4, i += 4) {
3122 if (addr == page_end-4) {
3123 cmd_flags = BNX2_NVM_COMMAND_LAST;
3125 rc = bnx2_nvram_write_dword(bp, addr,
3126 &flash_buffer[i], cmd_flags);
3129 goto nvram_write_end;
3135 /* Disable writes to flash interface (lock write-protect) */
3136 bnx2_disable_nvram_write(bp);
3138 /* Disable access to flash interface */
3139 bnx2_disable_nvram_access(bp);
3140 bnx2_release_nvram_lock(bp);
3142 /* Increment written */
3143 written += data_end - data_start;
3147 if (bp->flash_info->buffered == 0)
3148 kfree(flash_buffer);
3150 if (align_start || align_end)
3156 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3161 /* Wait for the current PCI transaction to complete before
3162 * issuing a reset. */
3163 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3164 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3165 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3166 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3167 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3168 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3171 /* Wait for the firmware to tell us it is ok to issue a reset. */
3172 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3174 /* Deposit a driver reset signature so the firmware knows that
3175 * this is a soft reset. */
3176 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3177 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3179 /* Do a dummy read to force the chip to complete all current transaction
3180 * before we issue a reset. */
3181 val = REG_RD(bp, BNX2_MISC_ID);
3183 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3184 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3185 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3188 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3190 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3191 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3194 /* Reset takes approximate 30 usec */
3195 for (i = 0; i < 10; i++) {
3196 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3197 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3198 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3204 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3205 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3206 printk(KERN_ERR PFX "Chip reset did not complete\n");
3210 /* Make sure byte swapping is properly configured. */
3211 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3212 if (val != 0x01020304) {
3213 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3217 /* Wait for the firmware to finish its initialization. */
3218 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3222 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3223 /* Adjust the voltage regular to two steps lower. The default
3224 * of this register is 0x0000000e. */
3225 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3227 /* Remove bad rbuf memory from the free pool. */
3228 rc = bnx2_alloc_bad_rbuf(bp);
3235 bnx2_init_chip(struct bnx2 *bp)
3240 /* Make sure the interrupt is not active. */
3241 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3243 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3244 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3246 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3248 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3249 DMA_READ_CHANS << 12 |
3250 DMA_WRITE_CHANS << 16;
3252 val |= (0x2 << 20) | (1 << 11);
3254 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3257 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3258 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3259 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3261 REG_WR(bp, BNX2_DMA_CONFIG, val);
3263 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3264 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3265 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3266 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3269 if (bp->flags & PCIX_FLAG) {
3272 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3274 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3275 val16 & ~PCI_X_CMD_ERO);
3278 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3279 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3280 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3281 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3283 /* Initialize context mapping and zero out the quick contexts. The
3284 * context block must have already been enabled. */
3285 bnx2_init_context(bp);
3287 if ((rc = bnx2_init_cpus(bp)) != 0)
3290 bnx2_init_nvram(bp);
3292 bnx2_set_mac_addr(bp);
3294 val = REG_RD(bp, BNX2_MQ_CONFIG);
3295 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3296 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3297 REG_WR(bp, BNX2_MQ_CONFIG, val);
3299 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3300 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3301 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3303 val = (BCM_PAGE_BITS - 8) << 24;
3304 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3306 /* Configure page size. */
3307 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3308 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3309 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3310 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3312 val = bp->mac_addr[0] +
3313 (bp->mac_addr[1] << 8) +
3314 (bp->mac_addr[2] << 16) +
3316 (bp->mac_addr[4] << 8) +
3317 (bp->mac_addr[5] << 16);
3318 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3320 /* Program the MTU. Also include 4 bytes for CRC32. */
3321 val = bp->dev->mtu + ETH_HLEN + 4;
3322 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3323 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3324 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3326 bp->last_status_idx = 0;
3327 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3329 /* Set up how to generate a link change interrupt. */
3330 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3332 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3333 (u64) bp->status_blk_mapping & 0xffffffff);
3334 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3336 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3337 (u64) bp->stats_blk_mapping & 0xffffffff);
3338 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3339 (u64) bp->stats_blk_mapping >> 32);
3341 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3342 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3344 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3345 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3347 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3348 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3350 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3352 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3354 REG_WR(bp, BNX2_HC_COM_TICKS,
3355 (bp->com_ticks_int << 16) | bp->com_ticks);
3357 REG_WR(bp, BNX2_HC_CMD_TICKS,
3358 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3360 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3361 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3363 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3364 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3366 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3367 BNX2_HC_CONFIG_TX_TMR_MODE |
3368 BNX2_HC_CONFIG_COLLECT_STATS);
3371 /* Clear internal stats counters. */
3372 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3374 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3376 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3377 BNX2_PORT_FEATURE_ASF_ENABLED)
3378 bp->flags |= ASF_ENABLE_FLAG;
3380 /* Initialize the receive filter. */
3381 bnx2_set_rx_mode(bp->dev);
3383 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3386 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3387 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3391 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3398 bnx2_init_tx_ring(struct bnx2 *bp)
3403 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3405 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3407 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3408 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3413 bp->tx_prod_bseq = 0;
3415 val = BNX2_L2CTX_TYPE_TYPE_L2;
3416 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3417 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3419 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3421 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3423 val = (u64) bp->tx_desc_mapping >> 32;
3424 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3426 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3427 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3431 bnx2_init_rx_ring(struct bnx2 *bp)
3435 u16 prod, ring_prod;
3438 /* 8 for CRC and VLAN */
3439 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3440 /* 8 for alignment */
3441 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3443 ring_prod = prod = bp->rx_prod = 0;
3446 bp->rx_prod_bseq = 0;
3448 for (i = 0; i < bp->rx_max_ring; i++) {
3451 rxbd = &bp->rx_desc_ring[i][0];
3452 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3453 rxbd->rx_bd_len = bp->rx_buf_use_size;
3454 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3456 if (i == (bp->rx_max_ring - 1))
3460 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3461 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3465 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3466 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3468 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3470 val = (u64) bp->rx_desc_mapping[0] >> 32;
3471 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3473 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3474 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3476 for (i = 0; i < bp->rx_ring_size; i++) {
3477 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3480 prod = NEXT_RX_BD(prod);
3481 ring_prod = RX_RING_IDX(prod);
3485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3491 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3495 bp->rx_ring_size = size;
3497 while (size > MAX_RX_DESC_CNT) {
3498 size -= MAX_RX_DESC_CNT;
3501 /* round to next power of 2 */
3503 while ((max & num_rings) == 0)
3506 if (num_rings != max)
3509 bp->rx_max_ring = max;
3510 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3514 bnx2_free_tx_skbs(struct bnx2 *bp)
3518 if (bp->tx_buf_ring == NULL)
3521 for (i = 0; i < TX_DESC_CNT; ) {
3522 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3523 struct sk_buff *skb = tx_buf->skb;
3531 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3532 skb_headlen(skb), PCI_DMA_TODEVICE);
3536 last = skb_shinfo(skb)->nr_frags;
3537 for (j = 0; j < last; j++) {
3538 tx_buf = &bp->tx_buf_ring[i + j + 1];
3539 pci_unmap_page(bp->pdev,
3540 pci_unmap_addr(tx_buf, mapping),
3541 skb_shinfo(skb)->frags[j].size,
3551 bnx2_free_rx_skbs(struct bnx2 *bp)
3555 if (bp->rx_buf_ring == NULL)
3558 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3559 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3560 struct sk_buff *skb = rx_buf->skb;
3565 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3566 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3575 bnx2_free_skbs(struct bnx2 *bp)
3577 bnx2_free_tx_skbs(bp);
3578 bnx2_free_rx_skbs(bp);
3582 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3586 rc = bnx2_reset_chip(bp, reset_code);
3591 if ((rc = bnx2_init_chip(bp)) != 0)
3594 bnx2_init_tx_ring(bp);
3595 bnx2_init_rx_ring(bp);
3600 bnx2_init_nic(struct bnx2 *bp)
3604 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3607 spin_lock_bh(&bp->phy_lock);
3609 spin_unlock_bh(&bp->phy_lock);
3615 bnx2_test_registers(struct bnx2 *bp)
3619 static const struct {
3625 { 0x006c, 0, 0x00000000, 0x0000003f },
3626 { 0x0090, 0, 0xffffffff, 0x00000000 },
3627 { 0x0094, 0, 0x00000000, 0x00000000 },
3629 { 0x0404, 0, 0x00003f00, 0x00000000 },
3630 { 0x0418, 0, 0x00000000, 0xffffffff },
3631 { 0x041c, 0, 0x00000000, 0xffffffff },
3632 { 0x0420, 0, 0x00000000, 0x80ffffff },
3633 { 0x0424, 0, 0x00000000, 0x00000000 },
3634 { 0x0428, 0, 0x00000000, 0x00000001 },
3635 { 0x0450, 0, 0x00000000, 0x0000ffff },
3636 { 0x0454, 0, 0x00000000, 0xffffffff },
3637 { 0x0458, 0, 0x00000000, 0xffffffff },
3639 { 0x0808, 0, 0x00000000, 0xffffffff },
3640 { 0x0854, 0, 0x00000000, 0xffffffff },
3641 { 0x0868, 0, 0x00000000, 0x77777777 },
3642 { 0x086c, 0, 0x00000000, 0x77777777 },
3643 { 0x0870, 0, 0x00000000, 0x77777777 },
3644 { 0x0874, 0, 0x00000000, 0x77777777 },
3646 { 0x0c00, 0, 0x00000000, 0x00000001 },
3647 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3648 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3650 { 0x1000, 0, 0x00000000, 0x00000001 },
3651 { 0x1004, 0, 0x00000000, 0x000f0001 },
3653 { 0x1408, 0, 0x01c00800, 0x00000000 },
3654 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3655 { 0x14a8, 0, 0x00000000, 0x000001ff },
3656 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3657 { 0x14b0, 0, 0x00000002, 0x00000001 },
3658 { 0x14b8, 0, 0x00000000, 0x00000000 },
3659 { 0x14c0, 0, 0x00000000, 0x00000009 },
3660 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3661 { 0x14cc, 0, 0x00000000, 0x00000001 },
3662 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3664 { 0x1800, 0, 0x00000000, 0x00000001 },
3665 { 0x1804, 0, 0x00000000, 0x00000003 },
3667 { 0x2800, 0, 0x00000000, 0x00000001 },
3668 { 0x2804, 0, 0x00000000, 0x00003f01 },
3669 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3670 { 0x2810, 0, 0xffff0000, 0x00000000 },
3671 { 0x2814, 0, 0xffff0000, 0x00000000 },
3672 { 0x2818, 0, 0xffff0000, 0x00000000 },
3673 { 0x281c, 0, 0xffff0000, 0x00000000 },
3674 { 0x2834, 0, 0xffffffff, 0x00000000 },
3675 { 0x2840, 0, 0x00000000, 0xffffffff },
3676 { 0x2844, 0, 0x00000000, 0xffffffff },
3677 { 0x2848, 0, 0xffffffff, 0x00000000 },
3678 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3680 { 0x2c00, 0, 0x00000000, 0x00000011 },
3681 { 0x2c04, 0, 0x00000000, 0x00030007 },
3683 { 0x3c00, 0, 0x00000000, 0x00000001 },
3684 { 0x3c04, 0, 0x00000000, 0x00070000 },
3685 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3686 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3687 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3688 { 0x3c14, 0, 0x00000000, 0xffffffff },
3689 { 0x3c18, 0, 0x00000000, 0xffffffff },
3690 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3691 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3693 { 0x5004, 0, 0x00000000, 0x0000007f },
3694 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3695 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3697 { 0x5c00, 0, 0x00000000, 0x00000001 },
3698 { 0x5c04, 0, 0x00000000, 0x0003000f },
3699 { 0x5c08, 0, 0x00000003, 0x00000000 },
3700 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3701 { 0x5c10, 0, 0x00000000, 0xffffffff },
3702 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3703 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3704 { 0x5c88, 0, 0x00000000, 0x00077373 },
3705 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3707 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3708 { 0x680c, 0, 0xffffffff, 0x00000000 },
3709 { 0x6810, 0, 0xffffffff, 0x00000000 },
3710 { 0x6814, 0, 0xffffffff, 0x00000000 },
3711 { 0x6818, 0, 0xffffffff, 0x00000000 },
3712 { 0x681c, 0, 0xffffffff, 0x00000000 },
3713 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3714 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3715 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3716 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3717 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3718 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3719 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3720 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3721 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3722 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3723 { 0x684c, 0, 0xffffffff, 0x00000000 },
3724 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3725 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3726 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3727 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3728 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3729 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3731 { 0xffff, 0, 0x00000000, 0x00000000 },
3735 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3736 u32 offset, rw_mask, ro_mask, save_val, val;
3738 offset = (u32) reg_tbl[i].offset;
3739 rw_mask = reg_tbl[i].rw_mask;
3740 ro_mask = reg_tbl[i].ro_mask;
3742 save_val = readl(bp->regview + offset);
3744 writel(0, bp->regview + offset);
3746 val = readl(bp->regview + offset);
3747 if ((val & rw_mask) != 0) {
3751 if ((val & ro_mask) != (save_val & ro_mask)) {
3755 writel(0xffffffff, bp->regview + offset);
3757 val = readl(bp->regview + offset);
3758 if ((val & rw_mask) != rw_mask) {
3762 if ((val & ro_mask) != (save_val & ro_mask)) {
3766 writel(save_val, bp->regview + offset);
3770 writel(save_val, bp->regview + offset);
3778 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3780 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3781 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3784 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3787 for (offset = 0; offset < size; offset += 4) {
3789 REG_WR_IND(bp, start + offset, test_pattern[i]);
3791 if (REG_RD_IND(bp, start + offset) !=
3801 bnx2_test_memory(struct bnx2 *bp)
3805 static const struct {
3809 { 0x60000, 0x4000 },
3810 { 0xa0000, 0x3000 },
3811 { 0xe0000, 0x4000 },
3812 { 0x120000, 0x4000 },
3813 { 0x1a0000, 0x4000 },
3814 { 0x160000, 0x4000 },
3818 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3819 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3820 mem_tbl[i].len)) != 0) {
3828 #define BNX2_MAC_LOOPBACK 0
3829 #define BNX2_PHY_LOOPBACK 1
3832 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3834 unsigned int pkt_size, num_pkts, i;
3835 struct sk_buff *skb, *rx_skb;
3836 unsigned char *packet;
3837 u16 rx_start_idx, rx_idx;
3840 struct sw_bd *rx_buf;
3841 struct l2_fhdr *rx_hdr;
3844 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3845 bp->loopback = MAC_LOOPBACK;
3846 bnx2_set_mac_loopback(bp);
3848 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3849 bp->loopback = PHY_LOOPBACK;
3850 bnx2_set_phy_loopback(bp);
3856 skb = netdev_alloc_skb(bp->dev, pkt_size);
3859 packet = skb_put(skb, pkt_size);
3860 memcpy(packet, bp->mac_addr, 6);
3861 memset(packet + 6, 0x0, 8);
3862 for (i = 14; i < pkt_size; i++)
3863 packet[i] = (unsigned char) (i & 0xff);
3865 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3868 REG_WR(bp, BNX2_HC_COMMAND,
3869 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3871 REG_RD(bp, BNX2_HC_COMMAND);
3874 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3878 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3880 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3881 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3882 txbd->tx_bd_mss_nbytes = pkt_size;
3883 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3886 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3887 bp->tx_prod_bseq += pkt_size;
3889 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
3890 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
3894 REG_WR(bp, BNX2_HC_COMMAND,
3895 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3897 REG_RD(bp, BNX2_HC_COMMAND);
3901 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
3904 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
3905 goto loopback_test_done;
3908 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
3909 if (rx_idx != rx_start_idx + num_pkts) {
3910 goto loopback_test_done;
3913 rx_buf = &bp->rx_buf_ring[rx_start_idx];
3914 rx_skb = rx_buf->skb;
3916 rx_hdr = (struct l2_fhdr *) rx_skb->data;
3917 skb_reserve(rx_skb, bp->rx_offset);
3919 pci_dma_sync_single_for_cpu(bp->pdev,
3920 pci_unmap_addr(rx_buf, mapping),
3921 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
3923 if (rx_hdr->l2_fhdr_status &
3924 (L2_FHDR_ERRORS_BAD_CRC |
3925 L2_FHDR_ERRORS_PHY_DECODE |
3926 L2_FHDR_ERRORS_ALIGNMENT |
3927 L2_FHDR_ERRORS_TOO_SHORT |
3928 L2_FHDR_ERRORS_GIANT_FRAME)) {
3930 goto loopback_test_done;
3933 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
3934 goto loopback_test_done;
3937 for (i = 14; i < pkt_size; i++) {
3938 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
3939 goto loopback_test_done;
3950 #define BNX2_MAC_LOOPBACK_FAILED 1
3951 #define BNX2_PHY_LOOPBACK_FAILED 2
3952 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
3953 BNX2_PHY_LOOPBACK_FAILED)
3956 bnx2_test_loopback(struct bnx2 *bp)
3960 if (!netif_running(bp->dev))
3961 return BNX2_LOOPBACK_FAILED;
3963 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
3964 spin_lock_bh(&bp->phy_lock);
3966 spin_unlock_bh(&bp->phy_lock);
3967 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
3968 rc |= BNX2_MAC_LOOPBACK_FAILED;
3969 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
3970 rc |= BNX2_PHY_LOOPBACK_FAILED;
3974 #define NVRAM_SIZE 0x200
3975 #define CRC32_RESIDUAL 0xdebb20e3
3978 bnx2_test_nvram(struct bnx2 *bp)
3980 u32 buf[NVRAM_SIZE / 4];
3981 u8 *data = (u8 *) buf;
3985 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
3986 goto test_nvram_done;
3988 magic = be32_to_cpu(buf[0]);
3989 if (magic != 0x669955aa) {
3991 goto test_nvram_done;
3994 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
3995 goto test_nvram_done;
3997 csum = ether_crc_le(0x100, data);
3998 if (csum != CRC32_RESIDUAL) {
4000 goto test_nvram_done;
4003 csum = ether_crc_le(0x100, data + 0x100);
4004 if (csum != CRC32_RESIDUAL) {
4013 bnx2_test_link(struct bnx2 *bp)
4017 spin_lock_bh(&bp->phy_lock);
4018 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4019 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4020 spin_unlock_bh(&bp->phy_lock);
4022 if (bmsr & BMSR_LSTATUS) {
4029 bnx2_test_intr(struct bnx2 *bp)
4034 if (!netif_running(bp->dev))
4037 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4039 /* This register is not touched during run-time. */
4040 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4041 REG_RD(bp, BNX2_HC_COMMAND);
4043 for (i = 0; i < 10; i++) {
4044 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4050 msleep_interruptible(10);
4059 bnx2_5706_serdes_timer(struct bnx2 *bp)
4061 spin_lock(&bp->phy_lock);
4062 if (bp->serdes_an_pending)
4063 bp->serdes_an_pending--;
4064 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4067 bp->current_interval = bp->timer_interval;
4069 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4071 if (bmcr & BMCR_ANENABLE) {
4074 bnx2_write_phy(bp, 0x1c, 0x7c00);
4075 bnx2_read_phy(bp, 0x1c, &phy1);
4077 bnx2_write_phy(bp, 0x17, 0x0f01);
4078 bnx2_read_phy(bp, 0x15, &phy2);
4079 bnx2_write_phy(bp, 0x17, 0x0f01);
4080 bnx2_read_phy(bp, 0x15, &phy2);
4082 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4083 !(phy2 & 0x20)) { /* no CONFIG */
4085 bmcr &= ~BMCR_ANENABLE;
4086 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4087 bnx2_write_phy(bp, MII_BMCR, bmcr);
4088 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4092 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4093 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4096 bnx2_write_phy(bp, 0x17, 0x0f01);
4097 bnx2_read_phy(bp, 0x15, &phy2);
4101 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4102 bmcr |= BMCR_ANENABLE;
4103 bnx2_write_phy(bp, MII_BMCR, bmcr);
4105 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4108 bp->current_interval = bp->timer_interval;
4110 spin_unlock(&bp->phy_lock);
4114 bnx2_5708_serdes_timer(struct bnx2 *bp)
4116 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4117 bp->serdes_an_pending = 0;
4121 spin_lock(&bp->phy_lock);
4122 if (bp->serdes_an_pending)
4123 bp->serdes_an_pending--;
4124 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4127 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4129 if (bmcr & BMCR_ANENABLE) {
4130 bmcr &= ~BMCR_ANENABLE;
4131 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4132 bnx2_write_phy(bp, MII_BMCR, bmcr);
4133 bp->current_interval = SERDES_FORCED_TIMEOUT;
4135 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4136 bmcr |= BMCR_ANENABLE;
4137 bnx2_write_phy(bp, MII_BMCR, bmcr);
4138 bp->serdes_an_pending = 2;
4139 bp->current_interval = bp->timer_interval;
4143 bp->current_interval = bp->timer_interval;
4145 spin_unlock(&bp->phy_lock);
4149 bnx2_timer(unsigned long data)
4151 struct bnx2 *bp = (struct bnx2 *) data;
4154 if (!netif_running(bp->dev))
4157 if (atomic_read(&bp->intr_sem) != 0)
4158 goto bnx2_restart_timer;
4160 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4161 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4163 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4165 if (bp->phy_flags & PHY_SERDES_FLAG) {
4166 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4167 bnx2_5706_serdes_timer(bp);
4168 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4169 bnx2_5708_serdes_timer(bp);
4173 mod_timer(&bp->timer, jiffies + bp->current_interval);
4176 /* Called with rtnl_lock */
4178 bnx2_open(struct net_device *dev)
4180 struct bnx2 *bp = netdev_priv(dev);
4183 bnx2_set_power_state(bp, PCI_D0);
4184 bnx2_disable_int(bp);
4186 rc = bnx2_alloc_mem(bp);
4190 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4191 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4194 if (pci_enable_msi(bp->pdev) == 0) {
4195 bp->flags |= USING_MSI_FLAG;
4196 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4200 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4201 IRQF_SHARED, dev->name, dev);
4205 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4213 rc = bnx2_init_nic(bp);
4216 free_irq(bp->pdev->irq, dev);
4217 if (bp->flags & USING_MSI_FLAG) {
4218 pci_disable_msi(bp->pdev);
4219 bp->flags &= ~USING_MSI_FLAG;
4226 mod_timer(&bp->timer, jiffies + bp->current_interval);
4228 atomic_set(&bp->intr_sem, 0);
4230 bnx2_enable_int(bp);
4232 if (bp->flags & USING_MSI_FLAG) {
4233 /* Test MSI to make sure it is working
4234 * If MSI test fails, go back to INTx mode
4236 if (bnx2_test_intr(bp) != 0) {
4237 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4238 " using MSI, switching to INTx mode. Please"
4239 " report this failure to the PCI maintainer"
4240 " and include system chipset information.\n",
4243 bnx2_disable_int(bp);
4244 free_irq(bp->pdev->irq, dev);
4245 pci_disable_msi(bp->pdev);
4246 bp->flags &= ~USING_MSI_FLAG;
4248 rc = bnx2_init_nic(bp);
4251 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4252 IRQF_SHARED, dev->name, dev);
4257 del_timer_sync(&bp->timer);
4260 bnx2_enable_int(bp);
4263 if (bp->flags & USING_MSI_FLAG) {
4264 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4267 netif_start_queue(dev);
4273 bnx2_reset_task(void *data)
4275 struct bnx2 *bp = data;
4277 if (!netif_running(bp->dev))
4280 bp->in_reset_task = 1;
4281 bnx2_netif_stop(bp);
4285 atomic_set(&bp->intr_sem, 1);
4286 bnx2_netif_start(bp);
4287 bp->in_reset_task = 0;
4291 bnx2_tx_timeout(struct net_device *dev)
4293 struct bnx2 *bp = netdev_priv(dev);
4295 /* This allows the netif to be shutdown gracefully before resetting */
4296 schedule_work(&bp->reset_task);
4300 /* Called with rtnl_lock */
4302 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4304 struct bnx2 *bp = netdev_priv(dev);
4306 bnx2_netif_stop(bp);
4309 bnx2_set_rx_mode(dev);
4311 bnx2_netif_start(bp);
4314 /* Called with rtnl_lock */
4316 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4318 struct bnx2 *bp = netdev_priv(dev);
4320 bnx2_netif_stop(bp);
4323 bp->vlgrp->vlan_devices[vid] = NULL;
4324 bnx2_set_rx_mode(dev);
4326 bnx2_netif_start(bp);
4330 /* Called with netif_tx_lock.
4331 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4332 * netif_wake_queue().
4335 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4337 struct bnx2 *bp = netdev_priv(dev);
4340 struct sw_bd *tx_buf;
4341 u32 len, vlan_tag_flags, last_frag, mss;
4342 u16 prod, ring_prod;
4345 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4346 netif_stop_queue(dev);
4347 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4350 return NETDEV_TX_BUSY;
4352 len = skb_headlen(skb);
4354 ring_prod = TX_RING_IDX(prod);
4357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4358 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4361 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4363 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4366 if ((mss = skb_shinfo(skb)->gso_size) &&
4367 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4368 u32 tcp_opt_len, ip_tcp_len;
4370 if (skb_header_cloned(skb) &&
4371 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4373 return NETDEV_TX_OK;
4376 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4377 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4380 if (skb->h.th->doff > 5) {
4381 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4383 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4385 skb->nh.iph->check = 0;
4386 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4388 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4392 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4393 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4394 (tcp_opt_len >> 2)) << 8;
4403 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4405 tx_buf = &bp->tx_buf_ring[ring_prod];
4407 pci_unmap_addr_set(tx_buf, mapping, mapping);
4409 txbd = &bp->tx_desc_ring[ring_prod];
4411 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4412 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4413 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4414 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4416 last_frag = skb_shinfo(skb)->nr_frags;
4418 for (i = 0; i < last_frag; i++) {
4419 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4421 prod = NEXT_TX_BD(prod);
4422 ring_prod = TX_RING_IDX(prod);
4423 txbd = &bp->tx_desc_ring[ring_prod];
4426 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4427 len, PCI_DMA_TODEVICE);
4428 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4431 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4432 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4433 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4434 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4437 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4439 prod = NEXT_TX_BD(prod);
4440 bp->tx_prod_bseq += skb->len;
4442 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4443 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4448 dev->trans_start = jiffies;
4450 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4451 netif_stop_queue(dev);
4452 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4453 netif_wake_queue(dev);
4456 return NETDEV_TX_OK;
4459 /* Called with rtnl_lock */
4461 bnx2_close(struct net_device *dev)
4463 struct bnx2 *bp = netdev_priv(dev);
4466 /* Calling flush_scheduled_work() may deadlock because
4467 * linkwatch_event() may be on the workqueue and it will try to get
4468 * the rtnl_lock which we are holding.
4470 while (bp->in_reset_task)
4473 bnx2_netif_stop(bp);
4474 del_timer_sync(&bp->timer);
4475 if (bp->flags & NO_WOL_FLAG)
4476 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4478 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4480 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4481 bnx2_reset_chip(bp, reset_code);
4482 free_irq(bp->pdev->irq, dev);
4483 if (bp->flags & USING_MSI_FLAG) {
4484 pci_disable_msi(bp->pdev);
4485 bp->flags &= ~USING_MSI_FLAG;
4490 netif_carrier_off(bp->dev);
4491 bnx2_set_power_state(bp, PCI_D3hot);
4495 #define GET_NET_STATS64(ctr) \
4496 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4497 (unsigned long) (ctr##_lo)
4499 #define GET_NET_STATS32(ctr) \
4502 #if (BITS_PER_LONG == 64)
4503 #define GET_NET_STATS GET_NET_STATS64
4505 #define GET_NET_STATS GET_NET_STATS32
4508 static struct net_device_stats *
4509 bnx2_get_stats(struct net_device *dev)
4511 struct bnx2 *bp = netdev_priv(dev);
4512 struct statistics_block *stats_blk = bp->stats_blk;
4513 struct net_device_stats *net_stats = &bp->net_stats;
4515 if (bp->stats_blk == NULL) {
4518 net_stats->rx_packets =
4519 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4520 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4521 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4523 net_stats->tx_packets =
4524 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4525 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4526 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4528 net_stats->rx_bytes =
4529 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4531 net_stats->tx_bytes =
4532 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4534 net_stats->multicast =
4535 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4537 net_stats->collisions =
4538 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4540 net_stats->rx_length_errors =
4541 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4542 stats_blk->stat_EtherStatsOverrsizePkts);
4544 net_stats->rx_over_errors =
4545 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4547 net_stats->rx_frame_errors =
4548 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4550 net_stats->rx_crc_errors =
4551 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4553 net_stats->rx_errors = net_stats->rx_length_errors +
4554 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4555 net_stats->rx_crc_errors;
4557 net_stats->tx_aborted_errors =
4558 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4559 stats_blk->stat_Dot3StatsLateCollisions);
4561 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4562 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4563 net_stats->tx_carrier_errors = 0;
4565 net_stats->tx_carrier_errors =
4567 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4570 net_stats->tx_errors =
4572 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4574 net_stats->tx_aborted_errors +
4575 net_stats->tx_carrier_errors;
4577 net_stats->rx_missed_errors =
4578 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4579 stats_blk->stat_FwRxDrop);
4584 /* All ethtool functions called with rtnl_lock */
4587 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4589 struct bnx2 *bp = netdev_priv(dev);
4591 cmd->supported = SUPPORTED_Autoneg;
4592 if (bp->phy_flags & PHY_SERDES_FLAG) {
4593 cmd->supported |= SUPPORTED_1000baseT_Full |
4596 cmd->port = PORT_FIBRE;
4599 cmd->supported |= SUPPORTED_10baseT_Half |
4600 SUPPORTED_10baseT_Full |
4601 SUPPORTED_100baseT_Half |
4602 SUPPORTED_100baseT_Full |
4603 SUPPORTED_1000baseT_Full |
4606 cmd->port = PORT_TP;
4609 cmd->advertising = bp->advertising;
4611 if (bp->autoneg & AUTONEG_SPEED) {
4612 cmd->autoneg = AUTONEG_ENABLE;
4615 cmd->autoneg = AUTONEG_DISABLE;
4618 if (netif_carrier_ok(dev)) {
4619 cmd->speed = bp->line_speed;
4620 cmd->duplex = bp->duplex;
4627 cmd->transceiver = XCVR_INTERNAL;
4628 cmd->phy_address = bp->phy_addr;
4634 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4636 struct bnx2 *bp = netdev_priv(dev);
4637 u8 autoneg = bp->autoneg;
4638 u8 req_duplex = bp->req_duplex;
4639 u16 req_line_speed = bp->req_line_speed;
4640 u32 advertising = bp->advertising;
4642 if (cmd->autoneg == AUTONEG_ENABLE) {
4643 autoneg |= AUTONEG_SPEED;
4645 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4647 /* allow advertising 1 speed */
4648 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4649 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4650 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4651 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4653 if (bp->phy_flags & PHY_SERDES_FLAG)
4656 advertising = cmd->advertising;
4659 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4660 advertising = cmd->advertising;
4662 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4666 if (bp->phy_flags & PHY_SERDES_FLAG) {
4667 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4670 advertising = ETHTOOL_ALL_COPPER_SPEED;
4673 advertising |= ADVERTISED_Autoneg;
4676 if (bp->phy_flags & PHY_SERDES_FLAG) {
4677 if ((cmd->speed != SPEED_1000 &&
4678 cmd->speed != SPEED_2500) ||
4679 (cmd->duplex != DUPLEX_FULL))
4682 if (cmd->speed == SPEED_2500 &&
4683 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4686 else if (cmd->speed == SPEED_1000) {
4689 autoneg &= ~AUTONEG_SPEED;
4690 req_line_speed = cmd->speed;
4691 req_duplex = cmd->duplex;
4695 bp->autoneg = autoneg;
4696 bp->advertising = advertising;
4697 bp->req_line_speed = req_line_speed;
4698 bp->req_duplex = req_duplex;
4700 spin_lock_bh(&bp->phy_lock);
4704 spin_unlock_bh(&bp->phy_lock);
4710 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4712 struct bnx2 *bp = netdev_priv(dev);
4714 strcpy(info->driver, DRV_MODULE_NAME);
4715 strcpy(info->version, DRV_MODULE_VERSION);
4716 strcpy(info->bus_info, pci_name(bp->pdev));
4717 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4718 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4719 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4720 info->fw_version[1] = info->fw_version[3] = '.';
4721 info->fw_version[5] = 0;
4724 #define BNX2_REGDUMP_LEN (32 * 1024)
4727 bnx2_get_regs_len(struct net_device *dev)
4729 return BNX2_REGDUMP_LEN;
4733 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4735 u32 *p = _p, i, offset;
4737 struct bnx2 *bp = netdev_priv(dev);
4738 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4739 0x0800, 0x0880, 0x0c00, 0x0c10,
4740 0x0c30, 0x0d08, 0x1000, 0x101c,
4741 0x1040, 0x1048, 0x1080, 0x10a4,
4742 0x1400, 0x1490, 0x1498, 0x14f0,
4743 0x1500, 0x155c, 0x1580, 0x15dc,
4744 0x1600, 0x1658, 0x1680, 0x16d8,
4745 0x1800, 0x1820, 0x1840, 0x1854,
4746 0x1880, 0x1894, 0x1900, 0x1984,
4747 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4748 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4749 0x2000, 0x2030, 0x23c0, 0x2400,
4750 0x2800, 0x2820, 0x2830, 0x2850,
4751 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4752 0x3c00, 0x3c94, 0x4000, 0x4010,
4753 0x4080, 0x4090, 0x43c0, 0x4458,
4754 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4755 0x4fc0, 0x5010, 0x53c0, 0x5444,
4756 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4757 0x5fc0, 0x6000, 0x6400, 0x6428,
4758 0x6800, 0x6848, 0x684c, 0x6860,
4759 0x6888, 0x6910, 0x8000 };
4763 memset(p, 0, BNX2_REGDUMP_LEN);
4765 if (!netif_running(bp->dev))
4769 offset = reg_boundaries[0];
4771 while (offset < BNX2_REGDUMP_LEN) {
4772 *p++ = REG_RD(bp, offset);
4774 if (offset == reg_boundaries[i + 1]) {
4775 offset = reg_boundaries[i + 2];
4776 p = (u32 *) (orig_p + offset);
4783 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4785 struct bnx2 *bp = netdev_priv(dev);
4787 if (bp->flags & NO_WOL_FLAG) {
4792 wol->supported = WAKE_MAGIC;
4794 wol->wolopts = WAKE_MAGIC;
4798 memset(&wol->sopass, 0, sizeof(wol->sopass));
4802 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4804 struct bnx2 *bp = netdev_priv(dev);
4806 if (wol->wolopts & ~WAKE_MAGIC)
4809 if (wol->wolopts & WAKE_MAGIC) {
4810 if (bp->flags & NO_WOL_FLAG)
4822 bnx2_nway_reset(struct net_device *dev)
4824 struct bnx2 *bp = netdev_priv(dev);
4827 if (!(bp->autoneg & AUTONEG_SPEED)) {
4831 spin_lock_bh(&bp->phy_lock);
4833 /* Force a link down visible on the other side */
4834 if (bp->phy_flags & PHY_SERDES_FLAG) {
4835 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4836 spin_unlock_bh(&bp->phy_lock);
4840 spin_lock_bh(&bp->phy_lock);
4842 bp->current_interval = SERDES_AN_TIMEOUT;
4843 bp->serdes_an_pending = 1;
4844 mod_timer(&bp->timer, jiffies + bp->current_interval);
4847 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4848 bmcr &= ~BMCR_LOOPBACK;
4849 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4851 spin_unlock_bh(&bp->phy_lock);
4857 bnx2_get_eeprom_len(struct net_device *dev)
4859 struct bnx2 *bp = netdev_priv(dev);
4861 if (bp->flash_info == NULL)
4864 return (int) bp->flash_size;
4868 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4871 struct bnx2 *bp = netdev_priv(dev);
4874 /* parameters already validated in ethtool_get_eeprom */
4876 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4882 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4885 struct bnx2 *bp = netdev_priv(dev);
4888 /* parameters already validated in ethtool_set_eeprom */
4890 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4896 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4898 struct bnx2 *bp = netdev_priv(dev);
4900 memset(coal, 0, sizeof(struct ethtool_coalesce));
4902 coal->rx_coalesce_usecs = bp->rx_ticks;
4903 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4904 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4905 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4907 coal->tx_coalesce_usecs = bp->tx_ticks;
4908 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4909 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4910 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4912 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4918 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4920 struct bnx2 *bp = netdev_priv(dev);
4922 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
4923 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
4925 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
4926 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
4928 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
4929 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
4931 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
4932 if (bp->rx_quick_cons_trip_int > 0xff)
4933 bp->rx_quick_cons_trip_int = 0xff;
4935 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
4936 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
4938 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
4939 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
4941 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
4942 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
4944 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
4945 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
4948 bp->stats_ticks = coal->stats_block_coalesce_usecs;
4949 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
4950 bp->stats_ticks &= 0xffff00;
4952 if (netif_running(bp->dev)) {
4953 bnx2_netif_stop(bp);
4955 bnx2_netif_start(bp);
4962 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4964 struct bnx2 *bp = netdev_priv(dev);
4966 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
4967 ering->rx_mini_max_pending = 0;
4968 ering->rx_jumbo_max_pending = 0;
4970 ering->rx_pending = bp->rx_ring_size;
4971 ering->rx_mini_pending = 0;
4972 ering->rx_jumbo_pending = 0;
4974 ering->tx_max_pending = MAX_TX_DESC_CNT;
4975 ering->tx_pending = bp->tx_ring_size;
4979 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
4981 struct bnx2 *bp = netdev_priv(dev);
4983 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
4984 (ering->tx_pending > MAX_TX_DESC_CNT) ||
4985 (ering->tx_pending <= MAX_SKB_FRAGS)) {
4989 if (netif_running(bp->dev)) {
4990 bnx2_netif_stop(bp);
4991 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
4996 bnx2_set_rx_ring_size(bp, ering->rx_pending);
4997 bp->tx_ring_size = ering->tx_pending;
4999 if (netif_running(bp->dev)) {
5002 rc = bnx2_alloc_mem(bp);
5006 bnx2_netif_start(bp);
5013 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5015 struct bnx2 *bp = netdev_priv(dev);
5017 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5018 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5019 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5023 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5025 struct bnx2 *bp = netdev_priv(dev);
5027 bp->req_flow_ctrl = 0;
5028 if (epause->rx_pause)
5029 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5030 if (epause->tx_pause)
5031 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5033 if (epause->autoneg) {
5034 bp->autoneg |= AUTONEG_FLOW_CTRL;
5037 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5040 spin_lock_bh(&bp->phy_lock);
5044 spin_unlock_bh(&bp->phy_lock);
5050 bnx2_get_rx_csum(struct net_device *dev)
5052 struct bnx2 *bp = netdev_priv(dev);
5058 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5060 struct bnx2 *bp = netdev_priv(dev);
5067 bnx2_set_tso(struct net_device *dev, u32 data)
5070 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5072 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5076 #define BNX2_NUM_STATS 46
5079 char string[ETH_GSTRING_LEN];
5080 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5082 { "rx_error_bytes" },
5084 { "tx_error_bytes" },
5085 { "rx_ucast_packets" },
5086 { "rx_mcast_packets" },
5087 { "rx_bcast_packets" },
5088 { "tx_ucast_packets" },
5089 { "tx_mcast_packets" },
5090 { "tx_bcast_packets" },
5091 { "tx_mac_errors" },
5092 { "tx_carrier_errors" },
5093 { "rx_crc_errors" },
5094 { "rx_align_errors" },
5095 { "tx_single_collisions" },
5096 { "tx_multi_collisions" },
5098 { "tx_excess_collisions" },
5099 { "tx_late_collisions" },
5100 { "tx_total_collisions" },
5103 { "rx_undersize_packets" },
5104 { "rx_oversize_packets" },
5105 { "rx_64_byte_packets" },
5106 { "rx_65_to_127_byte_packets" },
5107 { "rx_128_to_255_byte_packets" },
5108 { "rx_256_to_511_byte_packets" },
5109 { "rx_512_to_1023_byte_packets" },
5110 { "rx_1024_to_1522_byte_packets" },
5111 { "rx_1523_to_9022_byte_packets" },
5112 { "tx_64_byte_packets" },
5113 { "tx_65_to_127_byte_packets" },
5114 { "tx_128_to_255_byte_packets" },
5115 { "tx_256_to_511_byte_packets" },
5116 { "tx_512_to_1023_byte_packets" },
5117 { "tx_1024_to_1522_byte_packets" },
5118 { "tx_1523_to_9022_byte_packets" },
5119 { "rx_xon_frames" },
5120 { "rx_xoff_frames" },
5121 { "tx_xon_frames" },
5122 { "tx_xoff_frames" },
5123 { "rx_mac_ctrl_frames" },
5124 { "rx_filtered_packets" },
5126 { "rx_fw_discards" },
5129 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5131 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5132 STATS_OFFSET32(stat_IfHCInOctets_hi),
5133 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5134 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5135 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5136 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5137 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5138 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5139 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5140 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5141 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5142 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5143 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5144 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5145 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5146 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5147 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5148 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5149 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5150 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5151 STATS_OFFSET32(stat_EtherStatsCollisions),
5152 STATS_OFFSET32(stat_EtherStatsFragments),
5153 STATS_OFFSET32(stat_EtherStatsJabbers),
5154 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5155 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5156 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5157 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5158 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5159 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5160 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5161 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5162 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5163 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5164 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5165 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5166 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5167 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5168 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5169 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5170 STATS_OFFSET32(stat_XonPauseFramesReceived),
5171 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5172 STATS_OFFSET32(stat_OutXonSent),
5173 STATS_OFFSET32(stat_OutXoffSent),
5174 STATS_OFFSET32(stat_MacControlFramesReceived),
5175 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5176 STATS_OFFSET32(stat_IfInMBUFDiscards),
5177 STATS_OFFSET32(stat_FwRxDrop),
5180 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5181 * skipped because of errata.
5183 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5184 8,0,8,8,8,8,8,8,8,8,
5185 4,0,4,4,4,4,4,4,4,4,
5186 4,4,4,4,4,4,4,4,4,4,
5187 4,4,4,4,4,4,4,4,4,4,
5191 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5192 8,0,8,8,8,8,8,8,8,8,
5193 4,4,4,4,4,4,4,4,4,4,
5194 4,4,4,4,4,4,4,4,4,4,
5195 4,4,4,4,4,4,4,4,4,4,
5199 #define BNX2_NUM_TESTS 6
5202 char string[ETH_GSTRING_LEN];
5203 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5204 { "register_test (offline)" },
5205 { "memory_test (offline)" },
5206 { "loopback_test (offline)" },
5207 { "nvram_test (online)" },
5208 { "interrupt_test (online)" },
5209 { "link_test (online)" },
5213 bnx2_self_test_count(struct net_device *dev)
5215 return BNX2_NUM_TESTS;
5219 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5221 struct bnx2 *bp = netdev_priv(dev);
5223 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5224 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5227 bnx2_netif_stop(bp);
5228 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5231 if (bnx2_test_registers(bp) != 0) {
5233 etest->flags |= ETH_TEST_FL_FAILED;
5235 if (bnx2_test_memory(bp) != 0) {
5237 etest->flags |= ETH_TEST_FL_FAILED;
5239 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5240 etest->flags |= ETH_TEST_FL_FAILED;
5242 if (!netif_running(bp->dev)) {
5243 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5247 bnx2_netif_start(bp);
5250 /* wait for link up */
5251 for (i = 0; i < 7; i++) {
5254 msleep_interruptible(1000);
5258 if (bnx2_test_nvram(bp) != 0) {
5260 etest->flags |= ETH_TEST_FL_FAILED;
5262 if (bnx2_test_intr(bp) != 0) {
5264 etest->flags |= ETH_TEST_FL_FAILED;
5267 if (bnx2_test_link(bp) != 0) {
5269 etest->flags |= ETH_TEST_FL_FAILED;
5275 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5277 switch (stringset) {
5279 memcpy(buf, bnx2_stats_str_arr,
5280 sizeof(bnx2_stats_str_arr));
5283 memcpy(buf, bnx2_tests_str_arr,
5284 sizeof(bnx2_tests_str_arr));
5290 bnx2_get_stats_count(struct net_device *dev)
5292 return BNX2_NUM_STATS;
5296 bnx2_get_ethtool_stats(struct net_device *dev,
5297 struct ethtool_stats *stats, u64 *buf)
5299 struct bnx2 *bp = netdev_priv(dev);
5301 u32 *hw_stats = (u32 *) bp->stats_blk;
5302 u8 *stats_len_arr = NULL;
5304 if (hw_stats == NULL) {
5305 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5309 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5310 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5311 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5312 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5313 stats_len_arr = bnx2_5706_stats_len_arr;
5315 stats_len_arr = bnx2_5708_stats_len_arr;
5317 for (i = 0; i < BNX2_NUM_STATS; i++) {
5318 if (stats_len_arr[i] == 0) {
5319 /* skip this counter */
5323 if (stats_len_arr[i] == 4) {
5324 /* 4-byte counter */
5326 *(hw_stats + bnx2_stats_offset_arr[i]);
5329 /* 8-byte counter */
5330 buf[i] = (((u64) *(hw_stats +
5331 bnx2_stats_offset_arr[i])) << 32) +
5332 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5337 bnx2_phys_id(struct net_device *dev, u32 data)
5339 struct bnx2 *bp = netdev_priv(dev);
5346 save = REG_RD(bp, BNX2_MISC_CFG);
5347 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5349 for (i = 0; i < (data * 2); i++) {
5351 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5354 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5355 BNX2_EMAC_LED_1000MB_OVERRIDE |
5356 BNX2_EMAC_LED_100MB_OVERRIDE |
5357 BNX2_EMAC_LED_10MB_OVERRIDE |
5358 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5359 BNX2_EMAC_LED_TRAFFIC);
5361 msleep_interruptible(500);
5362 if (signal_pending(current))
5365 REG_WR(bp, BNX2_EMAC_LED, 0);
5366 REG_WR(bp, BNX2_MISC_CFG, save);
5370 static const struct ethtool_ops bnx2_ethtool_ops = {
5371 .get_settings = bnx2_get_settings,
5372 .set_settings = bnx2_set_settings,
5373 .get_drvinfo = bnx2_get_drvinfo,
5374 .get_regs_len = bnx2_get_regs_len,
5375 .get_regs = bnx2_get_regs,
5376 .get_wol = bnx2_get_wol,
5377 .set_wol = bnx2_set_wol,
5378 .nway_reset = bnx2_nway_reset,
5379 .get_link = ethtool_op_get_link,
5380 .get_eeprom_len = bnx2_get_eeprom_len,
5381 .get_eeprom = bnx2_get_eeprom,
5382 .set_eeprom = bnx2_set_eeprom,
5383 .get_coalesce = bnx2_get_coalesce,
5384 .set_coalesce = bnx2_set_coalesce,
5385 .get_ringparam = bnx2_get_ringparam,
5386 .set_ringparam = bnx2_set_ringparam,
5387 .get_pauseparam = bnx2_get_pauseparam,
5388 .set_pauseparam = bnx2_set_pauseparam,
5389 .get_rx_csum = bnx2_get_rx_csum,
5390 .set_rx_csum = bnx2_set_rx_csum,
5391 .get_tx_csum = ethtool_op_get_tx_csum,
5392 .set_tx_csum = ethtool_op_set_tx_csum,
5393 .get_sg = ethtool_op_get_sg,
5394 .set_sg = ethtool_op_set_sg,
5396 .get_tso = ethtool_op_get_tso,
5397 .set_tso = bnx2_set_tso,
5399 .self_test_count = bnx2_self_test_count,
5400 .self_test = bnx2_self_test,
5401 .get_strings = bnx2_get_strings,
5402 .phys_id = bnx2_phys_id,
5403 .get_stats_count = bnx2_get_stats_count,
5404 .get_ethtool_stats = bnx2_get_ethtool_stats,
5405 .get_perm_addr = ethtool_op_get_perm_addr,
5408 /* Called with rtnl_lock */
5410 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5412 struct mii_ioctl_data *data = if_mii(ifr);
5413 struct bnx2 *bp = netdev_priv(dev);
5418 data->phy_id = bp->phy_addr;
5424 spin_lock_bh(&bp->phy_lock);
5425 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5426 spin_unlock_bh(&bp->phy_lock);
5428 data->val_out = mii_regval;
5434 if (!capable(CAP_NET_ADMIN))
5437 spin_lock_bh(&bp->phy_lock);
5438 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5439 spin_unlock_bh(&bp->phy_lock);
5450 /* Called with rtnl_lock */
5452 bnx2_change_mac_addr(struct net_device *dev, void *p)
5454 struct sockaddr *addr = p;
5455 struct bnx2 *bp = netdev_priv(dev);
5457 if (!is_valid_ether_addr(addr->sa_data))
5460 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5461 if (netif_running(dev))
5462 bnx2_set_mac_addr(bp);
5467 /* Called with rtnl_lock */
5469 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5471 struct bnx2 *bp = netdev_priv(dev);
5473 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5474 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5478 if (netif_running(dev)) {
5479 bnx2_netif_stop(bp);
5483 bnx2_netif_start(bp);
5488 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5490 poll_bnx2(struct net_device *dev)
5492 struct bnx2 *bp = netdev_priv(dev);
5494 disable_irq(bp->pdev->irq);
5495 bnx2_interrupt(bp->pdev->irq, dev);
5496 enable_irq(bp->pdev->irq);
5500 static int __devinit
5501 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5504 unsigned long mem_len;
5508 SET_MODULE_OWNER(dev);
5509 SET_NETDEV_DEV(dev, &pdev->dev);
5510 bp = netdev_priv(dev);
5515 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5516 rc = pci_enable_device(pdev);
5518 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5522 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5524 "Cannot find PCI device base address, aborting.\n");
5526 goto err_out_disable;
5529 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5531 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5532 goto err_out_disable;
5535 pci_set_master(pdev);
5537 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5538 if (bp->pm_cap == 0) {
5540 "Cannot find power management capability, aborting.\n");
5542 goto err_out_release;
5545 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5546 if (bp->pcix_cap == 0) {
5547 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
5549 goto err_out_release;
5552 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5553 bp->flags |= USING_DAC_FLAG;
5554 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5556 "pci_set_consistent_dma_mask failed, aborting.\n");
5558 goto err_out_release;
5561 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5562 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5564 goto err_out_release;
5570 spin_lock_init(&bp->phy_lock);
5571 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5573 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5574 mem_len = MB_GET_CID_ADDR(17);
5575 dev->mem_end = dev->mem_start + mem_len;
5576 dev->irq = pdev->irq;
5578 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5581 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5583 goto err_out_release;
5586 /* Configure byte swap and enable write to the reg_window registers.
5587 * Rely on CPU to do target byte swapping on big endian systems
5588 * The chip's target access swapping will not swap all accesses
5590 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5591 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5592 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5594 bnx2_set_power_state(bp, PCI_D0);
5596 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5598 /* Get bus information. */
5599 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5600 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5603 bp->flags |= PCIX_FLAG;
5605 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5607 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5609 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5610 bp->bus_speed_mhz = 133;
5613 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5614 bp->bus_speed_mhz = 100;
5617 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5618 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5619 bp->bus_speed_mhz = 66;
5622 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5623 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5624 bp->bus_speed_mhz = 50;
5627 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5628 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5629 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5630 bp->bus_speed_mhz = 33;
5635 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5636 bp->bus_speed_mhz = 66;
5638 bp->bus_speed_mhz = 33;
5641 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5642 bp->flags |= PCI_32BIT_FLAG;
5644 /* 5706A0 may falsely detect SERR and PERR. */
5645 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5646 reg = REG_RD(bp, PCI_COMMAND);
5647 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5648 REG_WR(bp, PCI_COMMAND, reg);
5650 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5651 !(bp->flags & PCIX_FLAG)) {
5654 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5658 bnx2_init_nvram(bp);
5660 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5662 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5663 BNX2_SHM_HDR_SIGNATURE_SIG)
5664 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5666 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5668 /* Get the permanent MAC address. First we need to make sure the
5669 * firmware is actually running.
5671 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5673 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5674 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5675 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5680 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5682 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5683 bp->mac_addr[0] = (u8) (reg >> 8);
5684 bp->mac_addr[1] = (u8) reg;
5686 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5687 bp->mac_addr[2] = (u8) (reg >> 24);
5688 bp->mac_addr[3] = (u8) (reg >> 16);
5689 bp->mac_addr[4] = (u8) (reg >> 8);
5690 bp->mac_addr[5] = (u8) reg;
5692 bp->tx_ring_size = MAX_TX_DESC_CNT;
5693 bnx2_set_rx_ring_size(bp, 255);
5697 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5699 bp->tx_quick_cons_trip_int = 20;
5700 bp->tx_quick_cons_trip = 20;
5701 bp->tx_ticks_int = 80;
5704 bp->rx_quick_cons_trip_int = 6;
5705 bp->rx_quick_cons_trip = 6;
5706 bp->rx_ticks_int = 18;
5709 bp->stats_ticks = 1000000 & 0xffff00;
5711 bp->timer_interval = HZ;
5712 bp->current_interval = HZ;
5716 /* Disable WOL support if we are running on a SERDES chip. */
5717 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5718 bp->phy_flags |= PHY_SERDES_FLAG;
5719 bp->flags |= NO_WOL_FLAG;
5720 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5722 reg = REG_RD_IND(bp, bp->shmem_base +
5723 BNX2_SHARED_HW_CFG_CONFIG);
5724 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5725 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5729 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5730 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5731 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5732 bp->flags |= NO_WOL_FLAG;
5734 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5735 bp->tx_quick_cons_trip_int =
5736 bp->tx_quick_cons_trip;
5737 bp->tx_ticks_int = bp->tx_ticks;
5738 bp->rx_quick_cons_trip_int =
5739 bp->rx_quick_cons_trip;
5740 bp->rx_ticks_int = bp->rx_ticks;
5741 bp->comp_prod_trip_int = bp->comp_prod_trip;
5742 bp->com_ticks_int = bp->com_ticks;
5743 bp->cmd_ticks_int = bp->cmd_ticks;
5746 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5748 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5749 * with byte enables disabled on the unused 32-bit word. This is legal
5750 * but causes problems on the AMD 8132 which will eventually stop
5751 * responding after a while.
5753 * AMD believes this incompatibility is unique to the 5706, and
5754 * prefers to locally disable MSI rather than globally disabling it
5755 * using pci_msi_quirk.
5757 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5758 struct pci_dev *amd_8132 = NULL;
5760 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5761 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5765 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5766 if (rev >= 0x10 && rev <= 0x13) {
5768 pci_dev_put(amd_8132);
5774 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5775 bp->req_line_speed = 0;
5776 if (bp->phy_flags & PHY_SERDES_FLAG) {
5777 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5779 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5780 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5781 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5783 bp->req_line_speed = bp->line_speed = SPEED_1000;
5784 bp->req_duplex = DUPLEX_FULL;
5788 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5791 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5793 init_timer(&bp->timer);
5794 bp->timer.expires = RUN_AT(bp->timer_interval);
5795 bp->timer.data = (unsigned long) bp;
5796 bp->timer.function = bnx2_timer;
5802 iounmap(bp->regview);
5807 pci_release_regions(pdev);
5810 pci_disable_device(pdev);
5811 pci_set_drvdata(pdev, NULL);
5817 static int __devinit
5818 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5820 static int version_printed = 0;
5821 struct net_device *dev = NULL;
5825 if (version_printed++ == 0)
5826 printk(KERN_INFO "%s", version);
5828 /* dev zeroed in init_etherdev */
5829 dev = alloc_etherdev(sizeof(*bp));
5834 rc = bnx2_init_board(pdev, dev);
5840 dev->open = bnx2_open;
5841 dev->hard_start_xmit = bnx2_start_xmit;
5842 dev->stop = bnx2_close;
5843 dev->get_stats = bnx2_get_stats;
5844 dev->set_multicast_list = bnx2_set_rx_mode;
5845 dev->do_ioctl = bnx2_ioctl;
5846 dev->set_mac_address = bnx2_change_mac_addr;
5847 dev->change_mtu = bnx2_change_mtu;
5848 dev->tx_timeout = bnx2_tx_timeout;
5849 dev->watchdog_timeo = TX_TIMEOUT;
5851 dev->vlan_rx_register = bnx2_vlan_rx_register;
5852 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5854 dev->poll = bnx2_poll;
5855 dev->ethtool_ops = &bnx2_ethtool_ops;
5858 bp = netdev_priv(dev);
5860 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5861 dev->poll_controller = poll_bnx2;
5864 if ((rc = register_netdev(dev))) {
5865 dev_err(&pdev->dev, "Cannot register net device\n");
5867 iounmap(bp->regview);
5868 pci_release_regions(pdev);
5869 pci_disable_device(pdev);
5870 pci_set_drvdata(pdev, NULL);
5875 pci_set_drvdata(pdev, dev);
5877 memcpy(dev->dev_addr, bp->mac_addr, 6);
5878 memcpy(dev->perm_addr, bp->mac_addr, 6);
5879 bp->name = board_info[ent->driver_data].name,
5880 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5884 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5885 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5886 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5887 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5892 printk("node addr ");
5893 for (i = 0; i < 6; i++)
5894 printk("%2.2x", dev->dev_addr[i]);
5897 dev->features |= NETIF_F_SG;
5898 if (bp->flags & USING_DAC_FLAG)
5899 dev->features |= NETIF_F_HIGHDMA;
5900 dev->features |= NETIF_F_IP_CSUM;
5902 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5905 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5908 netif_carrier_off(bp->dev);
5913 static void __devexit
5914 bnx2_remove_one(struct pci_dev *pdev)
5916 struct net_device *dev = pci_get_drvdata(pdev);
5917 struct bnx2 *bp = netdev_priv(dev);
5919 flush_scheduled_work();
5921 unregister_netdev(dev);
5924 iounmap(bp->regview);
5927 pci_release_regions(pdev);
5928 pci_disable_device(pdev);
5929 pci_set_drvdata(pdev, NULL);
5933 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
5935 struct net_device *dev = pci_get_drvdata(pdev);
5936 struct bnx2 *bp = netdev_priv(dev);
5939 if (!netif_running(dev))
5942 flush_scheduled_work();
5943 bnx2_netif_stop(bp);
5944 netif_device_detach(dev);
5945 del_timer_sync(&bp->timer);
5946 if (bp->flags & NO_WOL_FLAG)
5947 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5949 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5951 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5952 bnx2_reset_chip(bp, reset_code);
5954 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
5959 bnx2_resume(struct pci_dev *pdev)
5961 struct net_device *dev = pci_get_drvdata(pdev);
5962 struct bnx2 *bp = netdev_priv(dev);
5964 if (!netif_running(dev))
5967 bnx2_set_power_state(bp, PCI_D0);
5968 netif_device_attach(dev);
5970 bnx2_netif_start(bp);
5974 static struct pci_driver bnx2_pci_driver = {
5975 .name = DRV_MODULE_NAME,
5976 .id_table = bnx2_pci_tbl,
5977 .probe = bnx2_init_one,
5978 .remove = __devexit_p(bnx2_remove_one),
5979 .suspend = bnx2_suspend,
5980 .resume = bnx2_resume,
5983 static int __init bnx2_init(void)
5985 return pci_register_driver(&bnx2_pci_driver);
5988 static void __exit bnx2_cleanup(void)
5990 pci_unregister_driver(&bnx2_pci_driver);
5993 module_init(bnx2_init);
5994 module_exit(bnx2_cleanup);