1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
45 #include <net/checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/prefetch.h>
51 #include <linux/cache.h>
52 #include <linux/zlib.h>
57 #define DRV_MODULE_NAME "bnx2"
58 #define PFX DRV_MODULE_NAME ": "
59 #define DRV_MODULE_VERSION "1.4.45"
60 #define DRV_MODULE_RELDATE "September 29, 2006"
62 #define RUN_AT(x) (jiffies + (x))
64 /* Time in jiffies before concluding the transmitter is hung. */
65 #define TX_TIMEOUT (5*HZ)
67 static const char version[] __devinitdata =
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
71 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 static int disable_msi = 0;
77 module_param(disable_msi, int, 0);
78 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
103 static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
121 static struct flash_spec flash_table[] =
124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
208 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
210 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
222 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
229 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
236 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
239 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
242 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
243 REG_WR(bp, BNX2_CTX_CTX_CTRL,
244 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
245 for (i = 0; i < 5; i++) {
247 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
248 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
253 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
254 REG_WR(bp, BNX2_CTX_DATA, val);
259 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
264 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
265 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
266 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
268 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
269 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
274 val1 = (bp->phy_addr << 21) | (reg << 16) |
275 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
276 BNX2_EMAC_MDIO_COMM_START_BUSY;
277 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
279 for (i = 0; i < 50; i++) {
282 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
283 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
286 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
287 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
293 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
302 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
303 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
304 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
306 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
307 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
316 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
321 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
322 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
323 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
325 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
326 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
332 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
333 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
334 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
336 for (i = 0; i < 50; i++) {
339 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
340 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
346 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
351 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
352 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
355 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
356 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 bnx2_disable_int(struct bnx2 *bp)
367 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
368 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
369 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
373 bnx2_enable_int(struct bnx2 *bp)
375 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
376 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
377 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
379 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
380 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
382 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
386 bnx2_disable_int_sync(struct bnx2 *bp)
388 atomic_inc(&bp->intr_sem);
389 bnx2_disable_int(bp);
390 synchronize_irq(bp->pdev->irq);
394 bnx2_netif_stop(struct bnx2 *bp)
396 bnx2_disable_int_sync(bp);
397 if (netif_running(bp->dev)) {
398 netif_poll_disable(bp->dev);
399 netif_tx_disable(bp->dev);
400 bp->dev->trans_start = jiffies; /* prevent tx timeout */
405 bnx2_netif_start(struct bnx2 *bp)
407 if (atomic_dec_and_test(&bp->intr_sem)) {
408 if (netif_running(bp->dev)) {
409 netif_wake_queue(bp->dev);
410 netif_poll_enable(bp->dev);
417 bnx2_free_mem(struct bnx2 *bp)
421 for (i = 0; i < bp->ctx_pages; i++) {
422 if (bp->ctx_blk[i]) {
423 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
425 bp->ctx_blk_mapping[i]);
426 bp->ctx_blk[i] = NULL;
429 if (bp->status_blk) {
430 pci_free_consistent(bp->pdev, bp->status_stats_size,
431 bp->status_blk, bp->status_blk_mapping);
432 bp->status_blk = NULL;
433 bp->stats_blk = NULL;
435 if (bp->tx_desc_ring) {
436 pci_free_consistent(bp->pdev,
437 sizeof(struct tx_bd) * TX_DESC_CNT,
438 bp->tx_desc_ring, bp->tx_desc_mapping);
439 bp->tx_desc_ring = NULL;
441 kfree(bp->tx_buf_ring);
442 bp->tx_buf_ring = NULL;
443 for (i = 0; i < bp->rx_max_ring; i++) {
444 if (bp->rx_desc_ring[i])
445 pci_free_consistent(bp->pdev,
446 sizeof(struct rx_bd) * RX_DESC_CNT,
448 bp->rx_desc_mapping[i]);
449 bp->rx_desc_ring[i] = NULL;
451 vfree(bp->rx_buf_ring);
452 bp->rx_buf_ring = NULL;
456 bnx2_alloc_mem(struct bnx2 *bp)
458 int i, status_blk_size;
460 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
462 if (bp->tx_buf_ring == NULL)
465 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
466 sizeof(struct tx_bd) *
468 &bp->tx_desc_mapping);
469 if (bp->tx_desc_ring == NULL)
472 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
474 if (bp->rx_buf_ring == NULL)
477 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
480 for (i = 0; i < bp->rx_max_ring; i++) {
481 bp->rx_desc_ring[i] =
482 pci_alloc_consistent(bp->pdev,
483 sizeof(struct rx_bd) * RX_DESC_CNT,
484 &bp->rx_desc_mapping[i]);
485 if (bp->rx_desc_ring[i] == NULL)
490 /* Combine status and statistics blocks into one allocation. */
491 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
492 bp->status_stats_size = status_blk_size +
493 sizeof(struct statistics_block);
495 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
496 &bp->status_blk_mapping);
497 if (bp->status_blk == NULL)
500 memset(bp->status_blk, 0, bp->status_stats_size);
502 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
505 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
507 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
508 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
509 if (bp->ctx_pages == 0)
511 for (i = 0; i < bp->ctx_pages; i++) {
512 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
514 &bp->ctx_blk_mapping[i]);
515 if (bp->ctx_blk[i] == NULL)
527 bnx2_report_fw_link(struct bnx2 *bp)
529 u32 fw_link_status = 0;
534 switch (bp->line_speed) {
536 if (bp->duplex == DUPLEX_HALF)
537 fw_link_status = BNX2_LINK_STATUS_10HALF;
539 fw_link_status = BNX2_LINK_STATUS_10FULL;
542 if (bp->duplex == DUPLEX_HALF)
543 fw_link_status = BNX2_LINK_STATUS_100HALF;
545 fw_link_status = BNX2_LINK_STATUS_100FULL;
548 if (bp->duplex == DUPLEX_HALF)
549 fw_link_status = BNX2_LINK_STATUS_1000HALF;
551 fw_link_status = BNX2_LINK_STATUS_1000FULL;
554 if (bp->duplex == DUPLEX_HALF)
555 fw_link_status = BNX2_LINK_STATUS_2500HALF;
557 fw_link_status = BNX2_LINK_STATUS_2500FULL;
561 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
564 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
566 bnx2_read_phy(bp, MII_BMSR, &bmsr);
567 bnx2_read_phy(bp, MII_BMSR, &bmsr);
569 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
570 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
571 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
573 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
577 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
579 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
583 bnx2_report_link(struct bnx2 *bp)
586 netif_carrier_on(bp->dev);
587 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
589 printk("%d Mbps ", bp->line_speed);
591 if (bp->duplex == DUPLEX_FULL)
592 printk("full duplex");
594 printk("half duplex");
597 if (bp->flow_ctrl & FLOW_CTRL_RX) {
598 printk(", receive ");
599 if (bp->flow_ctrl & FLOW_CTRL_TX)
600 printk("& transmit ");
603 printk(", transmit ");
605 printk("flow control ON");
610 netif_carrier_off(bp->dev);
611 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
614 bnx2_report_fw_link(bp);
618 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
620 u32 local_adv, remote_adv;
623 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
624 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
626 if (bp->duplex == DUPLEX_FULL) {
627 bp->flow_ctrl = bp->req_flow_ctrl;
632 if (bp->duplex != DUPLEX_FULL) {
636 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
637 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
640 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
641 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
642 bp->flow_ctrl |= FLOW_CTRL_TX;
643 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
644 bp->flow_ctrl |= FLOW_CTRL_RX;
648 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
649 bnx2_read_phy(bp, MII_LPA, &remote_adv);
651 if (bp->phy_flags & PHY_SERDES_FLAG) {
652 u32 new_local_adv = 0;
653 u32 new_remote_adv = 0;
655 if (local_adv & ADVERTISE_1000XPAUSE)
656 new_local_adv |= ADVERTISE_PAUSE_CAP;
657 if (local_adv & ADVERTISE_1000XPSE_ASYM)
658 new_local_adv |= ADVERTISE_PAUSE_ASYM;
659 if (remote_adv & ADVERTISE_1000XPAUSE)
660 new_remote_adv |= ADVERTISE_PAUSE_CAP;
661 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
662 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
664 local_adv = new_local_adv;
665 remote_adv = new_remote_adv;
668 /* See Table 28B-3 of 802.3ab-1999 spec. */
669 if (local_adv & ADVERTISE_PAUSE_CAP) {
670 if(local_adv & ADVERTISE_PAUSE_ASYM) {
671 if (remote_adv & ADVERTISE_PAUSE_CAP) {
672 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
674 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
675 bp->flow_ctrl = FLOW_CTRL_RX;
679 if (remote_adv & ADVERTISE_PAUSE_CAP) {
680 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
684 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
685 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
686 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
688 bp->flow_ctrl = FLOW_CTRL_TX;
694 bnx2_5708s_linkup(struct bnx2 *bp)
699 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
700 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
701 case BCM5708S_1000X_STAT1_SPEED_10:
702 bp->line_speed = SPEED_10;
704 case BCM5708S_1000X_STAT1_SPEED_100:
705 bp->line_speed = SPEED_100;
707 case BCM5708S_1000X_STAT1_SPEED_1G:
708 bp->line_speed = SPEED_1000;
710 case BCM5708S_1000X_STAT1_SPEED_2G5:
711 bp->line_speed = SPEED_2500;
714 if (val & BCM5708S_1000X_STAT1_FD)
715 bp->duplex = DUPLEX_FULL;
717 bp->duplex = DUPLEX_HALF;
723 bnx2_5706s_linkup(struct bnx2 *bp)
725 u32 bmcr, local_adv, remote_adv, common;
728 bp->line_speed = SPEED_1000;
730 bnx2_read_phy(bp, MII_BMCR, &bmcr);
731 if (bmcr & BMCR_FULLDPLX) {
732 bp->duplex = DUPLEX_FULL;
735 bp->duplex = DUPLEX_HALF;
738 if (!(bmcr & BMCR_ANENABLE)) {
742 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
743 bnx2_read_phy(bp, MII_LPA, &remote_adv);
745 common = local_adv & remote_adv;
746 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
748 if (common & ADVERTISE_1000XFULL) {
749 bp->duplex = DUPLEX_FULL;
752 bp->duplex = DUPLEX_HALF;
760 bnx2_copper_linkup(struct bnx2 *bp)
764 bnx2_read_phy(bp, MII_BMCR, &bmcr);
765 if (bmcr & BMCR_ANENABLE) {
766 u32 local_adv, remote_adv, common;
768 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
769 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
771 common = local_adv & (remote_adv >> 2);
772 if (common & ADVERTISE_1000FULL) {
773 bp->line_speed = SPEED_1000;
774 bp->duplex = DUPLEX_FULL;
776 else if (common & ADVERTISE_1000HALF) {
777 bp->line_speed = SPEED_1000;
778 bp->duplex = DUPLEX_HALF;
781 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
782 bnx2_read_phy(bp, MII_LPA, &remote_adv);
784 common = local_adv & remote_adv;
785 if (common & ADVERTISE_100FULL) {
786 bp->line_speed = SPEED_100;
787 bp->duplex = DUPLEX_FULL;
789 else if (common & ADVERTISE_100HALF) {
790 bp->line_speed = SPEED_100;
791 bp->duplex = DUPLEX_HALF;
793 else if (common & ADVERTISE_10FULL) {
794 bp->line_speed = SPEED_10;
795 bp->duplex = DUPLEX_FULL;
797 else if (common & ADVERTISE_10HALF) {
798 bp->line_speed = SPEED_10;
799 bp->duplex = DUPLEX_HALF;
808 if (bmcr & BMCR_SPEED100) {
809 bp->line_speed = SPEED_100;
812 bp->line_speed = SPEED_10;
814 if (bmcr & BMCR_FULLDPLX) {
815 bp->duplex = DUPLEX_FULL;
818 bp->duplex = DUPLEX_HALF;
826 bnx2_set_mac_link(struct bnx2 *bp)
830 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
831 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
832 (bp->duplex == DUPLEX_HALF)) {
833 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
836 /* Configure the EMAC mode register. */
837 val = REG_RD(bp, BNX2_EMAC_MODE);
839 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
840 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
841 BNX2_EMAC_MODE_25G_MODE);
844 switch (bp->line_speed) {
846 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
847 val |= BNX2_EMAC_MODE_PORT_MII_10M;
852 val |= BNX2_EMAC_MODE_PORT_MII;
855 val |= BNX2_EMAC_MODE_25G_MODE;
858 val |= BNX2_EMAC_MODE_PORT_GMII;
863 val |= BNX2_EMAC_MODE_PORT_GMII;
866 /* Set the MAC to operate in the appropriate duplex mode. */
867 if (bp->duplex == DUPLEX_HALF)
868 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
869 REG_WR(bp, BNX2_EMAC_MODE, val);
871 /* Enable/disable rx PAUSE. */
872 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
874 if (bp->flow_ctrl & FLOW_CTRL_RX)
875 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
876 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
878 /* Enable/disable tx PAUSE. */
879 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
880 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
882 if (bp->flow_ctrl & FLOW_CTRL_TX)
883 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
884 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
886 /* Acknowledge the interrupt. */
887 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
893 bnx2_set_link(struct bnx2 *bp)
898 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
903 link_up = bp->link_up;
905 bnx2_read_phy(bp, MII_BMSR, &bmsr);
906 bnx2_read_phy(bp, MII_BMSR, &bmsr);
908 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
909 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
912 val = REG_RD(bp, BNX2_EMAC_STATUS);
913 if (val & BNX2_EMAC_STATUS_LINK)
914 bmsr |= BMSR_LSTATUS;
916 bmsr &= ~BMSR_LSTATUS;
919 if (bmsr & BMSR_LSTATUS) {
922 if (bp->phy_flags & PHY_SERDES_FLAG) {
923 if (CHIP_NUM(bp) == CHIP_NUM_5706)
924 bnx2_5706s_linkup(bp);
925 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
926 bnx2_5708s_linkup(bp);
929 bnx2_copper_linkup(bp);
931 bnx2_resolve_flow_ctrl(bp);
934 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
935 (bp->autoneg & AUTONEG_SPEED)) {
939 bnx2_read_phy(bp, MII_BMCR, &bmcr);
940 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
941 if (!(bmcr & BMCR_ANENABLE)) {
942 bnx2_write_phy(bp, MII_BMCR, bmcr |
946 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
950 if (bp->link_up != link_up) {
951 bnx2_report_link(bp);
954 bnx2_set_mac_link(bp);
960 bnx2_reset_phy(struct bnx2 *bp)
965 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
967 #define PHY_RESET_MAX_WAIT 100
968 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
971 bnx2_read_phy(bp, MII_BMCR, ®);
972 if (!(reg & BMCR_RESET)) {
977 if (i == PHY_RESET_MAX_WAIT) {
984 bnx2_phy_get_pause_adv(struct bnx2 *bp)
988 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
989 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
991 if (bp->phy_flags & PHY_SERDES_FLAG) {
992 adv = ADVERTISE_1000XPAUSE;
995 adv = ADVERTISE_PAUSE_CAP;
998 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
999 if (bp->phy_flags & PHY_SERDES_FLAG) {
1000 adv = ADVERTISE_1000XPSE_ASYM;
1003 adv = ADVERTISE_PAUSE_ASYM;
1006 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1007 if (bp->phy_flags & PHY_SERDES_FLAG) {
1008 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1011 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1018 bnx2_setup_serdes_phy(struct bnx2 *bp)
1023 if (!(bp->autoneg & AUTONEG_SPEED)) {
1025 int force_link_down = 0;
1027 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1028 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1030 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1031 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
1032 new_bmcr |= BMCR_SPEED1000;
1033 if (bp->req_line_speed == SPEED_2500) {
1034 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1035 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1036 if (!(up1 & BCM5708S_UP1_2G5)) {
1037 up1 |= BCM5708S_UP1_2G5;
1038 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1039 force_link_down = 1;
1041 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1042 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1043 if (up1 & BCM5708S_UP1_2G5) {
1044 up1 &= ~BCM5708S_UP1_2G5;
1045 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1046 force_link_down = 1;
1050 if (bp->req_duplex == DUPLEX_FULL) {
1051 adv |= ADVERTISE_1000XFULL;
1052 new_bmcr |= BMCR_FULLDPLX;
1055 adv |= ADVERTISE_1000XHALF;
1056 new_bmcr &= ~BMCR_FULLDPLX;
1058 if ((new_bmcr != bmcr) || (force_link_down)) {
1059 /* Force a link down visible on the other side */
1061 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1062 ~(ADVERTISE_1000XFULL |
1063 ADVERTISE_1000XHALF));
1064 bnx2_write_phy(bp, MII_BMCR, bmcr |
1065 BMCR_ANRESTART | BMCR_ANENABLE);
1068 netif_carrier_off(bp->dev);
1069 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1070 bnx2_report_link(bp);
1072 bnx2_write_phy(bp, MII_ADVERTISE, adv);
1073 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1078 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1079 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1080 up1 |= BCM5708S_UP1_2G5;
1081 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1084 if (bp->advertising & ADVERTISED_1000baseT_Full)
1085 new_adv |= ADVERTISE_1000XFULL;
1087 new_adv |= bnx2_phy_get_pause_adv(bp);
1089 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1090 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1092 bp->serdes_an_pending = 0;
1093 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1094 /* Force a link down visible on the other side */
1096 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1097 spin_unlock_bh(&bp->phy_lock);
1099 spin_lock_bh(&bp->phy_lock);
1102 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1103 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1105 /* Speed up link-up time when the link partner
1106 * does not autonegotiate which is very common
1107 * in blade servers. Some blade servers use
1108 * IPMI for kerboard input and it's important
1109 * to minimize link disruptions. Autoneg. involves
1110 * exchanging base pages plus 3 next pages and
1111 * normally completes in about 120 msec.
1113 bp->current_interval = SERDES_AN_TIMEOUT;
1114 bp->serdes_an_pending = 1;
1115 mod_timer(&bp->timer, jiffies + bp->current_interval);
1121 #define ETHTOOL_ALL_FIBRE_SPEED \
1122 (ADVERTISED_1000baseT_Full)
1124 #define ETHTOOL_ALL_COPPER_SPEED \
1125 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1126 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1127 ADVERTISED_1000baseT_Full)
1129 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1130 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1132 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1135 bnx2_setup_copper_phy(struct bnx2 *bp)
1140 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1142 if (bp->autoneg & AUTONEG_SPEED) {
1143 u32 adv_reg, adv1000_reg;
1144 u32 new_adv_reg = 0;
1145 u32 new_adv1000_reg = 0;
1147 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1148 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1149 ADVERTISE_PAUSE_ASYM);
1151 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1152 adv1000_reg &= PHY_ALL_1000_SPEED;
1154 if (bp->advertising & ADVERTISED_10baseT_Half)
1155 new_adv_reg |= ADVERTISE_10HALF;
1156 if (bp->advertising & ADVERTISED_10baseT_Full)
1157 new_adv_reg |= ADVERTISE_10FULL;
1158 if (bp->advertising & ADVERTISED_100baseT_Half)
1159 new_adv_reg |= ADVERTISE_100HALF;
1160 if (bp->advertising & ADVERTISED_100baseT_Full)
1161 new_adv_reg |= ADVERTISE_100FULL;
1162 if (bp->advertising & ADVERTISED_1000baseT_Full)
1163 new_adv1000_reg |= ADVERTISE_1000FULL;
1165 new_adv_reg |= ADVERTISE_CSMA;
1167 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1169 if ((adv1000_reg != new_adv1000_reg) ||
1170 (adv_reg != new_adv_reg) ||
1171 ((bmcr & BMCR_ANENABLE) == 0)) {
1173 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1174 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1175 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1178 else if (bp->link_up) {
1179 /* Flow ctrl may have changed from auto to forced */
1180 /* or vice-versa. */
1182 bnx2_resolve_flow_ctrl(bp);
1183 bnx2_set_mac_link(bp);
1189 if (bp->req_line_speed == SPEED_100) {
1190 new_bmcr |= BMCR_SPEED100;
1192 if (bp->req_duplex == DUPLEX_FULL) {
1193 new_bmcr |= BMCR_FULLDPLX;
1195 if (new_bmcr != bmcr) {
1198 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1199 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1201 if (bmsr & BMSR_LSTATUS) {
1202 /* Force link down */
1203 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1204 spin_unlock_bh(&bp->phy_lock);
1206 spin_lock_bh(&bp->phy_lock);
1208 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1209 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1212 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1214 /* Normally, the new speed is setup after the link has
1215 * gone down and up again. In some cases, link will not go
1216 * down so we need to set up the new speed here.
1218 if (bmsr & BMSR_LSTATUS) {
1219 bp->line_speed = bp->req_line_speed;
1220 bp->duplex = bp->req_duplex;
1221 bnx2_resolve_flow_ctrl(bp);
1222 bnx2_set_mac_link(bp);
1229 bnx2_setup_phy(struct bnx2 *bp)
1231 if (bp->loopback == MAC_LOOPBACK)
1234 if (bp->phy_flags & PHY_SERDES_FLAG) {
1235 return (bnx2_setup_serdes_phy(bp));
1238 return (bnx2_setup_copper_phy(bp));
1243 bnx2_init_5708s_phy(struct bnx2 *bp)
1247 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1248 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1249 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1251 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1252 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1253 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1255 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1256 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1257 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1259 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1260 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1261 val |= BCM5708S_UP1_2G5;
1262 bnx2_write_phy(bp, BCM5708S_UP1, val);
1265 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1266 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1267 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1268 /* increase tx signal amplitude */
1269 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1270 BCM5708S_BLK_ADDR_TX_MISC);
1271 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1272 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1273 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1274 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1277 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1278 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1283 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1284 BNX2_SHARED_HW_CFG_CONFIG);
1285 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1286 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1287 BCM5708S_BLK_ADDR_TX_MISC);
1288 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1289 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1290 BCM5708S_BLK_ADDR_DIG);
1297 bnx2_init_5706s_phy(struct bnx2 *bp)
1299 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1301 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1302 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1304 if (bp->dev->mtu > 1500) {
1307 /* Set extended packet length bit */
1308 bnx2_write_phy(bp, 0x18, 0x7);
1309 bnx2_read_phy(bp, 0x18, &val);
1310 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1312 bnx2_write_phy(bp, 0x1c, 0x6c00);
1313 bnx2_read_phy(bp, 0x1c, &val);
1314 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1319 bnx2_write_phy(bp, 0x18, 0x7);
1320 bnx2_read_phy(bp, 0x18, &val);
1321 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1323 bnx2_write_phy(bp, 0x1c, 0x6c00);
1324 bnx2_read_phy(bp, 0x1c, &val);
1325 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1332 bnx2_init_copper_phy(struct bnx2 *bp)
1336 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1338 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1339 bnx2_write_phy(bp, 0x18, 0x0c00);
1340 bnx2_write_phy(bp, 0x17, 0x000a);
1341 bnx2_write_phy(bp, 0x15, 0x310b);
1342 bnx2_write_phy(bp, 0x17, 0x201f);
1343 bnx2_write_phy(bp, 0x15, 0x9506);
1344 bnx2_write_phy(bp, 0x17, 0x401f);
1345 bnx2_write_phy(bp, 0x15, 0x14e2);
1346 bnx2_write_phy(bp, 0x18, 0x0400);
1349 if (bp->dev->mtu > 1500) {
1350 /* Set extended packet length bit */
1351 bnx2_write_phy(bp, 0x18, 0x7);
1352 bnx2_read_phy(bp, 0x18, &val);
1353 bnx2_write_phy(bp, 0x18, val | 0x4000);
1355 bnx2_read_phy(bp, 0x10, &val);
1356 bnx2_write_phy(bp, 0x10, val | 0x1);
1359 bnx2_write_phy(bp, 0x18, 0x7);
1360 bnx2_read_phy(bp, 0x18, &val);
1361 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1363 bnx2_read_phy(bp, 0x10, &val);
1364 bnx2_write_phy(bp, 0x10, val & ~0x1);
1367 /* ethernet@wirespeed */
1368 bnx2_write_phy(bp, 0x18, 0x7007);
1369 bnx2_read_phy(bp, 0x18, &val);
1370 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1376 bnx2_init_phy(struct bnx2 *bp)
1381 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1382 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1384 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1388 bnx2_read_phy(bp, MII_PHYSID1, &val);
1389 bp->phy_id = val << 16;
1390 bnx2_read_phy(bp, MII_PHYSID2, &val);
1391 bp->phy_id |= val & 0xffff;
1393 if (bp->phy_flags & PHY_SERDES_FLAG) {
1394 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1395 rc = bnx2_init_5706s_phy(bp);
1396 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1397 rc = bnx2_init_5708s_phy(bp);
1400 rc = bnx2_init_copper_phy(bp);
1409 bnx2_set_mac_loopback(struct bnx2 *bp)
1413 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1414 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1415 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1416 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1421 static int bnx2_test_link(struct bnx2 *);
1424 bnx2_set_phy_loopback(struct bnx2 *bp)
1429 spin_lock_bh(&bp->phy_lock);
1430 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1432 spin_unlock_bh(&bp->phy_lock);
1436 for (i = 0; i < 10; i++) {
1437 if (bnx2_test_link(bp) == 0)
1442 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1443 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1444 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1445 BNX2_EMAC_MODE_25G_MODE);
1447 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1448 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1454 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1460 msg_data |= bp->fw_wr_seq;
1462 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1464 /* wait for an acknowledgement. */
1465 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1468 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1470 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1473 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1476 /* If we timed out, inform the firmware that this is the case. */
1477 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1479 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1482 msg_data &= ~BNX2_DRV_MSG_CODE;
1483 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1485 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1490 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1497 bnx2_init_5709_context(struct bnx2 *bp)
1502 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1503 val |= (BCM_PAGE_BITS - 8) << 16;
1504 REG_WR(bp, BNX2_CTX_COMMAND, val);
1505 for (i = 0; i < bp->ctx_pages; i++) {
1508 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1509 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1510 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1511 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1512 (u64) bp->ctx_blk_mapping[i] >> 32);
1513 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1514 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1515 for (j = 0; j < 10; j++) {
1517 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1518 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1522 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1531 bnx2_init_context(struct bnx2 *bp)
1537 u32 vcid_addr, pcid_addr, offset;
1541 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1544 vcid_addr = GET_PCID_ADDR(vcid);
1546 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1551 pcid_addr = GET_PCID_ADDR(new_vcid);
1554 vcid_addr = GET_CID_ADDR(vcid);
1555 pcid_addr = vcid_addr;
1558 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1559 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1561 /* Zero out the context. */
1562 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1563 CTX_WR(bp, 0x00, offset, 0);
1566 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1567 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1572 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1578 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1579 if (good_mbuf == NULL) {
1580 printk(KERN_ERR PFX "Failed to allocate memory in "
1581 "bnx2_alloc_bad_rbuf\n");
1585 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1586 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1590 /* Allocate a bunch of mbufs and save the good ones in an array. */
1591 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1592 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1593 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1595 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1597 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1599 /* The addresses with Bit 9 set are bad memory blocks. */
1600 if (!(val & (1 << 9))) {
1601 good_mbuf[good_mbuf_cnt] = (u16) val;
1605 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1608 /* Free the good ones back to the mbuf pool thus discarding
1609 * all the bad ones. */
1610 while (good_mbuf_cnt) {
1613 val = good_mbuf[good_mbuf_cnt];
1614 val = (val << 9) | val | 1;
1616 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1623 bnx2_set_mac_addr(struct bnx2 *bp)
1626 u8 *mac_addr = bp->dev->dev_addr;
1628 val = (mac_addr[0] << 8) | mac_addr[1];
1630 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1632 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1633 (mac_addr[4] << 8) | mac_addr[5];
1635 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1639 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1641 struct sk_buff *skb;
1642 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1644 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1645 unsigned long align;
1647 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1652 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1653 skb_reserve(skb, BNX2_RX_ALIGN - align);
1655 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1656 PCI_DMA_FROMDEVICE);
1659 pci_unmap_addr_set(rx_buf, mapping, mapping);
1661 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1662 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1664 bp->rx_prod_bseq += bp->rx_buf_use_size;
1670 bnx2_phy_int(struct bnx2 *bp)
1672 u32 new_link_state, old_link_state;
1674 new_link_state = bp->status_blk->status_attn_bits &
1675 STATUS_ATTN_BITS_LINK_STATE;
1676 old_link_state = bp->status_blk->status_attn_bits_ack &
1677 STATUS_ATTN_BITS_LINK_STATE;
1678 if (new_link_state != old_link_state) {
1679 if (new_link_state) {
1680 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1681 STATUS_ATTN_BITS_LINK_STATE);
1684 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1685 STATUS_ATTN_BITS_LINK_STATE);
1692 bnx2_tx_int(struct bnx2 *bp)
1694 struct status_block *sblk = bp->status_blk;
1695 u16 hw_cons, sw_cons, sw_ring_cons;
1698 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1699 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1702 sw_cons = bp->tx_cons;
1704 while (sw_cons != hw_cons) {
1705 struct sw_bd *tx_buf;
1706 struct sk_buff *skb;
1709 sw_ring_cons = TX_RING_IDX(sw_cons);
1711 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1714 /* partial BD completions possible with TSO packets */
1715 if (skb_is_gso(skb)) {
1716 u16 last_idx, last_ring_idx;
1718 last_idx = sw_cons +
1719 skb_shinfo(skb)->nr_frags + 1;
1720 last_ring_idx = sw_ring_cons +
1721 skb_shinfo(skb)->nr_frags + 1;
1722 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1725 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1730 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1731 skb_headlen(skb), PCI_DMA_TODEVICE);
1734 last = skb_shinfo(skb)->nr_frags;
1736 for (i = 0; i < last; i++) {
1737 sw_cons = NEXT_TX_BD(sw_cons);
1739 pci_unmap_page(bp->pdev,
1741 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1743 skb_shinfo(skb)->frags[i].size,
1747 sw_cons = NEXT_TX_BD(sw_cons);
1749 tx_free_bd += last + 1;
1753 hw_cons = bp->hw_tx_cons =
1754 sblk->status_tx_quick_consumer_index0;
1756 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1761 bp->tx_cons = sw_cons;
1762 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1763 * before checking for netif_queue_stopped(). Without the
1764 * memory barrier, there is a small possibility that bnx2_start_xmit()
1765 * will miss it and cause the queue to be stopped forever.
1769 if (unlikely(netif_queue_stopped(bp->dev)) &&
1770 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1771 netif_tx_lock(bp->dev);
1772 if ((netif_queue_stopped(bp->dev)) &&
1773 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
1774 netif_wake_queue(bp->dev);
1775 netif_tx_unlock(bp->dev);
1780 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1783 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1784 struct rx_bd *cons_bd, *prod_bd;
1786 cons_rx_buf = &bp->rx_buf_ring[cons];
1787 prod_rx_buf = &bp->rx_buf_ring[prod];
1789 pci_dma_sync_single_for_device(bp->pdev,
1790 pci_unmap_addr(cons_rx_buf, mapping),
1791 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1793 bp->rx_prod_bseq += bp->rx_buf_use_size;
1795 prod_rx_buf->skb = skb;
1800 pci_unmap_addr_set(prod_rx_buf, mapping,
1801 pci_unmap_addr(cons_rx_buf, mapping));
1803 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1804 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1805 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1806 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
1810 bnx2_rx_int(struct bnx2 *bp, int budget)
1812 struct status_block *sblk = bp->status_blk;
1813 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1814 struct l2_fhdr *rx_hdr;
1817 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
1818 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1821 sw_cons = bp->rx_cons;
1822 sw_prod = bp->rx_prod;
1824 /* Memory barrier necessary as speculative reads of the rx
1825 * buffer can be ahead of the index in the status block
1828 while (sw_cons != hw_cons) {
1831 struct sw_bd *rx_buf;
1832 struct sk_buff *skb;
1833 dma_addr_t dma_addr;
1835 sw_ring_cons = RX_RING_IDX(sw_cons);
1836 sw_ring_prod = RX_RING_IDX(sw_prod);
1838 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1843 dma_addr = pci_unmap_addr(rx_buf, mapping);
1845 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
1846 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1848 rx_hdr = (struct l2_fhdr *) skb->data;
1849 len = rx_hdr->l2_fhdr_pkt_len - 4;
1851 if ((status = rx_hdr->l2_fhdr_status) &
1852 (L2_FHDR_ERRORS_BAD_CRC |
1853 L2_FHDR_ERRORS_PHY_DECODE |
1854 L2_FHDR_ERRORS_ALIGNMENT |
1855 L2_FHDR_ERRORS_TOO_SHORT |
1856 L2_FHDR_ERRORS_GIANT_FRAME)) {
1861 /* Since we don't have a jumbo ring, copy small packets
1864 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1865 struct sk_buff *new_skb;
1867 new_skb = netdev_alloc_skb(bp->dev, len + 2);
1868 if (new_skb == NULL)
1872 memcpy(new_skb->data,
1873 skb->data + bp->rx_offset - 2,
1876 skb_reserve(new_skb, 2);
1877 skb_put(new_skb, len);
1879 bnx2_reuse_rx_skb(bp, skb,
1880 sw_ring_cons, sw_ring_prod);
1884 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
1885 pci_unmap_single(bp->pdev, dma_addr,
1886 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1888 skb_reserve(skb, bp->rx_offset);
1893 bnx2_reuse_rx_skb(bp, skb,
1894 sw_ring_cons, sw_ring_prod);
1898 skb->protocol = eth_type_trans(skb, bp->dev);
1900 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
1901 (ntohs(skb->protocol) != 0x8100)) {
1908 skb->ip_summed = CHECKSUM_NONE;
1910 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1911 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1913 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1914 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
1915 skb->ip_summed = CHECKSUM_UNNECESSARY;
1919 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1920 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1921 rx_hdr->l2_fhdr_vlan_tag);
1925 netif_receive_skb(skb);
1927 bp->dev->last_rx = jiffies;
1931 sw_cons = NEXT_RX_BD(sw_cons);
1932 sw_prod = NEXT_RX_BD(sw_prod);
1934 if ((rx_pkt == budget))
1937 /* Refresh hw_cons to see if there is new work */
1938 if (sw_cons == hw_cons) {
1939 hw_cons = bp->hw_rx_cons =
1940 sblk->status_rx_quick_consumer_index0;
1941 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1946 bp->rx_cons = sw_cons;
1947 bp->rx_prod = sw_prod;
1949 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1951 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1959 /* MSI ISR - The only difference between this and the INTx ISR
1960 * is that the MSI interrupt is always serviced.
1963 bnx2_msi(int irq, void *dev_instance)
1965 struct net_device *dev = dev_instance;
1966 struct bnx2 *bp = netdev_priv(dev);
1968 prefetch(bp->status_blk);
1969 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1970 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1971 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1973 /* Return here if interrupt is disabled. */
1974 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1977 netif_rx_schedule(dev);
1983 bnx2_interrupt(int irq, void *dev_instance)
1985 struct net_device *dev = dev_instance;
1986 struct bnx2 *bp = netdev_priv(dev);
1988 /* When using INTx, it is possible for the interrupt to arrive
1989 * at the CPU before the status block posted prior to the
1990 * interrupt. Reading a register will flush the status block.
1991 * When using MSI, the MSI message will always complete after
1992 * the status block write.
1994 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
1995 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1996 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
1999 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2000 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2001 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2003 /* Return here if interrupt is shared and is disabled. */
2004 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2007 netif_rx_schedule(dev);
2013 bnx2_has_work(struct bnx2 *bp)
2015 struct status_block *sblk = bp->status_blk;
2017 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2018 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2021 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
2029 bnx2_poll(struct net_device *dev, int *budget)
2031 struct bnx2 *bp = netdev_priv(dev);
2033 if ((bp->status_blk->status_attn_bits &
2034 STATUS_ATTN_BITS_LINK_STATE) !=
2035 (bp->status_blk->status_attn_bits_ack &
2036 STATUS_ATTN_BITS_LINK_STATE)) {
2038 spin_lock(&bp->phy_lock);
2040 spin_unlock(&bp->phy_lock);
2042 /* This is needed to take care of transient status
2043 * during link changes.
2045 REG_WR(bp, BNX2_HC_COMMAND,
2046 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2047 REG_RD(bp, BNX2_HC_COMMAND);
2050 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2053 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2054 int orig_budget = *budget;
2057 if (orig_budget > dev->quota)
2058 orig_budget = dev->quota;
2060 work_done = bnx2_rx_int(bp, orig_budget);
2061 *budget -= work_done;
2062 dev->quota -= work_done;
2065 bp->last_status_idx = bp->status_blk->status_idx;
2068 if (!bnx2_has_work(bp)) {
2069 netif_rx_complete(dev);
2070 if (likely(bp->flags & USING_MSI_FLAG)) {
2071 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2072 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2073 bp->last_status_idx);
2076 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2077 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2078 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2079 bp->last_status_idx);
2081 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2082 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2083 bp->last_status_idx);
2090 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2091 * from set_multicast.
2094 bnx2_set_rx_mode(struct net_device *dev)
2096 struct bnx2 *bp = netdev_priv(dev);
2097 u32 rx_mode, sort_mode;
2100 spin_lock_bh(&bp->phy_lock);
2102 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2103 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2104 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2106 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2107 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2109 if (!(bp->flags & ASF_ENABLE_FLAG))
2110 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2112 if (dev->flags & IFF_PROMISC) {
2113 /* Promiscuous mode. */
2114 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2115 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2116 BNX2_RPM_SORT_USER0_PROM_VLAN;
2118 else if (dev->flags & IFF_ALLMULTI) {
2119 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2120 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2123 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2126 /* Accept one or more multicast(s). */
2127 struct dev_mc_list *mclist;
2128 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2133 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2135 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2136 i++, mclist = mclist->next) {
2138 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2140 regidx = (bit & 0xe0) >> 5;
2142 mc_filter[regidx] |= (1 << bit);
2145 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2146 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2150 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2153 if (rx_mode != bp->rx_mode) {
2154 bp->rx_mode = rx_mode;
2155 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2158 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2159 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2160 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2162 spin_unlock_bh(&bp->phy_lock);
2165 #define FW_BUF_SIZE 0x8000
2168 bnx2_gunzip_init(struct bnx2 *bp)
2170 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2173 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2176 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2177 if (bp->strm->workspace == NULL)
2187 vfree(bp->gunzip_buf);
2188 bp->gunzip_buf = NULL;
2191 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2192 "uncompression.\n", bp->dev->name);
2197 bnx2_gunzip_end(struct bnx2 *bp)
2199 kfree(bp->strm->workspace);
2204 if (bp->gunzip_buf) {
2205 vfree(bp->gunzip_buf);
2206 bp->gunzip_buf = NULL;
2211 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2215 /* check gzip header */
2216 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2222 if (zbuf[3] & FNAME)
2223 while ((zbuf[n++] != 0) && (n < len));
2225 bp->strm->next_in = zbuf + n;
2226 bp->strm->avail_in = len - n;
2227 bp->strm->next_out = bp->gunzip_buf;
2228 bp->strm->avail_out = FW_BUF_SIZE;
2230 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2234 rc = zlib_inflate(bp->strm, Z_FINISH);
2236 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2237 *outbuf = bp->gunzip_buf;
2239 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2240 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2241 bp->dev->name, bp->strm->msg);
2243 zlib_inflateEnd(bp->strm);
2245 if (rc == Z_STREAM_END)
2252 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2259 for (i = 0; i < rv2p_code_len; i += 8) {
2260 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2262 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2265 if (rv2p_proc == RV2P_PROC1) {
2266 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2267 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2270 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2271 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2275 /* Reset the processor, un-stall is done later. */
2276 if (rv2p_proc == RV2P_PROC1) {
2277 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2280 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2285 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2292 val = REG_RD_IND(bp, cpu_reg->mode);
2293 val |= cpu_reg->mode_value_halt;
2294 REG_WR_IND(bp, cpu_reg->mode, val);
2295 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2297 /* Load the Text area. */
2298 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2303 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2313 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2314 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2318 /* Load the Data area. */
2319 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2323 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2324 REG_WR_IND(bp, offset, fw->data[j]);
2328 /* Load the SBSS area. */
2329 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2333 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2334 REG_WR_IND(bp, offset, fw->sbss[j]);
2338 /* Load the BSS area. */
2339 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2343 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2344 REG_WR_IND(bp, offset, fw->bss[j]);
2348 /* Load the Read-Only area. */
2349 offset = cpu_reg->spad_base +
2350 (fw->rodata_addr - cpu_reg->mips_view_base);
2354 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2355 REG_WR_IND(bp, offset, fw->rodata[j]);
2359 /* Clear the pre-fetch instruction. */
2360 REG_WR_IND(bp, cpu_reg->inst, 0);
2361 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2363 /* Start the CPU. */
2364 val = REG_RD_IND(bp, cpu_reg->mode);
2365 val &= ~cpu_reg->mode_value_halt;
2366 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2367 REG_WR_IND(bp, cpu_reg->mode, val);
2373 bnx2_init_cpus(struct bnx2 *bp)
2375 struct cpu_reg cpu_reg;
2381 if ((rc = bnx2_gunzip_init(bp)) != 0)
2384 /* Initialize the RV2P processor. */
2385 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2390 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2392 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2397 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2399 /* Initialize the RX Processor. */
2400 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2401 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2402 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2403 cpu_reg.state = BNX2_RXP_CPU_STATE;
2404 cpu_reg.state_value_clear = 0xffffff;
2405 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2406 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2407 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2408 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2409 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2410 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2411 cpu_reg.mips_view_base = 0x8000000;
2413 fw = &bnx2_rxp_fw_06;
2415 rc = load_cpu_fw(bp, &cpu_reg, fw);
2419 /* Initialize the TX Processor. */
2420 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2421 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2422 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2423 cpu_reg.state = BNX2_TXP_CPU_STATE;
2424 cpu_reg.state_value_clear = 0xffffff;
2425 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2426 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2427 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2428 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2429 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2430 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2431 cpu_reg.mips_view_base = 0x8000000;
2433 fw = &bnx2_txp_fw_06;
2435 rc = load_cpu_fw(bp, &cpu_reg, fw);
2439 /* Initialize the TX Patch-up Processor. */
2440 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2441 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2442 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2443 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2444 cpu_reg.state_value_clear = 0xffffff;
2445 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2446 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2447 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2448 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2449 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2450 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2451 cpu_reg.mips_view_base = 0x8000000;
2453 fw = &bnx2_tpat_fw_06;
2455 rc = load_cpu_fw(bp, &cpu_reg, fw);
2459 /* Initialize the Completion Processor. */
2460 cpu_reg.mode = BNX2_COM_CPU_MODE;
2461 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2462 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2463 cpu_reg.state = BNX2_COM_CPU_STATE;
2464 cpu_reg.state_value_clear = 0xffffff;
2465 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2466 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2467 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2468 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2469 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2470 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2471 cpu_reg.mips_view_base = 0x8000000;
2473 fw = &bnx2_com_fw_06;
2475 rc = load_cpu_fw(bp, &cpu_reg, fw);
2480 bnx2_gunzip_end(bp);
2485 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2489 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2495 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2496 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2497 PCI_PM_CTRL_PME_STATUS);
2499 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2500 /* delay required during transition out of D3hot */
2503 val = REG_RD(bp, BNX2_EMAC_MODE);
2504 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2505 val &= ~BNX2_EMAC_MODE_MPKT;
2506 REG_WR(bp, BNX2_EMAC_MODE, val);
2508 val = REG_RD(bp, BNX2_RPM_CONFIG);
2509 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2510 REG_WR(bp, BNX2_RPM_CONFIG, val);
2521 autoneg = bp->autoneg;
2522 advertising = bp->advertising;
2524 bp->autoneg = AUTONEG_SPEED;
2525 bp->advertising = ADVERTISED_10baseT_Half |
2526 ADVERTISED_10baseT_Full |
2527 ADVERTISED_100baseT_Half |
2528 ADVERTISED_100baseT_Full |
2531 bnx2_setup_copper_phy(bp);
2533 bp->autoneg = autoneg;
2534 bp->advertising = advertising;
2536 bnx2_set_mac_addr(bp);
2538 val = REG_RD(bp, BNX2_EMAC_MODE);
2540 /* Enable port mode. */
2541 val &= ~BNX2_EMAC_MODE_PORT;
2542 val |= BNX2_EMAC_MODE_PORT_MII |
2543 BNX2_EMAC_MODE_MPKT_RCVD |
2544 BNX2_EMAC_MODE_ACPI_RCVD |
2545 BNX2_EMAC_MODE_MPKT;
2547 REG_WR(bp, BNX2_EMAC_MODE, val);
2549 /* receive all multicast */
2550 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2551 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2554 REG_WR(bp, BNX2_EMAC_RX_MODE,
2555 BNX2_EMAC_RX_MODE_SORT_MODE);
2557 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2558 BNX2_RPM_SORT_USER0_MC_EN;
2559 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2560 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2561 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2562 BNX2_RPM_SORT_USER0_ENA);
2564 /* Need to enable EMAC and RPM for WOL. */
2565 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2566 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2567 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2568 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2570 val = REG_RD(bp, BNX2_RPM_CONFIG);
2571 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2572 REG_WR(bp, BNX2_RPM_CONFIG, val);
2574 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2577 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2580 if (!(bp->flags & NO_WOL_FLAG))
2581 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2583 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2584 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2585 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2594 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2596 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2599 /* No more memory access after this point until
2600 * device is brought back to D0.
2612 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2617 /* Request access to the flash interface. */
2618 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2619 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2620 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2621 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2627 if (j >= NVRAM_TIMEOUT_COUNT)
2634 bnx2_release_nvram_lock(struct bnx2 *bp)
2639 /* Relinquish nvram interface. */
2640 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2642 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2643 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2644 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2650 if (j >= NVRAM_TIMEOUT_COUNT)
2658 bnx2_enable_nvram_write(struct bnx2 *bp)
2662 val = REG_RD(bp, BNX2_MISC_CFG);
2663 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2665 if (!bp->flash_info->buffered) {
2668 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2669 REG_WR(bp, BNX2_NVM_COMMAND,
2670 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2672 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2675 val = REG_RD(bp, BNX2_NVM_COMMAND);
2676 if (val & BNX2_NVM_COMMAND_DONE)
2680 if (j >= NVRAM_TIMEOUT_COUNT)
2687 bnx2_disable_nvram_write(struct bnx2 *bp)
2691 val = REG_RD(bp, BNX2_MISC_CFG);
2692 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2697 bnx2_enable_nvram_access(struct bnx2 *bp)
2701 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2702 /* Enable both bits, even on read. */
2703 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2704 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2708 bnx2_disable_nvram_access(struct bnx2 *bp)
2712 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2713 /* Disable both bits, even after read. */
2714 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
2715 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2716 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2720 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2725 if (bp->flash_info->buffered)
2726 /* Buffered flash, no erase needed */
2729 /* Build an erase command */
2730 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2731 BNX2_NVM_COMMAND_DOIT;
2733 /* Need to clear DONE bit separately. */
2734 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2736 /* Address of the NVRAM to read from. */
2737 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2739 /* Issue an erase command. */
2740 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2742 /* Wait for completion. */
2743 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2748 val = REG_RD(bp, BNX2_NVM_COMMAND);
2749 if (val & BNX2_NVM_COMMAND_DONE)
2753 if (j >= NVRAM_TIMEOUT_COUNT)
2760 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2765 /* Build the command word. */
2766 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2768 /* Calculate an offset of a buffered flash. */
2769 if (bp->flash_info->buffered) {
2770 offset = ((offset / bp->flash_info->page_size) <<
2771 bp->flash_info->page_bits) +
2772 (offset % bp->flash_info->page_size);
2775 /* Need to clear DONE bit separately. */
2776 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2778 /* Address of the NVRAM to read from. */
2779 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2781 /* Issue a read command. */
2782 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2784 /* Wait for completion. */
2785 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2790 val = REG_RD(bp, BNX2_NVM_COMMAND);
2791 if (val & BNX2_NVM_COMMAND_DONE) {
2792 val = REG_RD(bp, BNX2_NVM_READ);
2794 val = be32_to_cpu(val);
2795 memcpy(ret_val, &val, 4);
2799 if (j >= NVRAM_TIMEOUT_COUNT)
2807 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2812 /* Build the command word. */
2813 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2815 /* Calculate an offset of a buffered flash. */
2816 if (bp->flash_info->buffered) {
2817 offset = ((offset / bp->flash_info->page_size) <<
2818 bp->flash_info->page_bits) +
2819 (offset % bp->flash_info->page_size);
2822 /* Need to clear DONE bit separately. */
2823 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2825 memcpy(&val32, val, 4);
2826 val32 = cpu_to_be32(val32);
2828 /* Write the data. */
2829 REG_WR(bp, BNX2_NVM_WRITE, val32);
2831 /* Address of the NVRAM to write to. */
2832 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2834 /* Issue the write command. */
2835 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2837 /* Wait for completion. */
2838 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2841 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2844 if (j >= NVRAM_TIMEOUT_COUNT)
2851 bnx2_init_nvram(struct bnx2 *bp)
2854 int j, entry_count, rc;
2855 struct flash_spec *flash;
2857 /* Determine the selected interface. */
2858 val = REG_RD(bp, BNX2_NVM_CFG1);
2860 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2863 if (val & 0x40000000) {
2865 /* Flash interface has been reconfigured */
2866 for (j = 0, flash = &flash_table[0]; j < entry_count;
2868 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2869 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
2870 bp->flash_info = flash;
2877 /* Not yet been reconfigured */
2879 if (val & (1 << 23))
2880 mask = FLASH_BACKUP_STRAP_MASK;
2882 mask = FLASH_STRAP_MASK;
2884 for (j = 0, flash = &flash_table[0]; j < entry_count;
2887 if ((val & mask) == (flash->strapping & mask)) {
2888 bp->flash_info = flash;
2890 /* Request access to the flash interface. */
2891 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2894 /* Enable access to flash interface */
2895 bnx2_enable_nvram_access(bp);
2897 /* Reconfigure the flash interface */
2898 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2899 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2900 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2901 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2903 /* Disable access to flash interface */
2904 bnx2_disable_nvram_access(bp);
2905 bnx2_release_nvram_lock(bp);
2910 } /* if (val & 0x40000000) */
2912 if (j == entry_count) {
2913 bp->flash_info = NULL;
2914 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
2918 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2919 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2921 bp->flash_size = val;
2923 bp->flash_size = bp->flash_info->total_size;
2929 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2933 u32 cmd_flags, offset32, len32, extra;
2938 /* Request access to the flash interface. */
2939 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2942 /* Enable access to flash interface */
2943 bnx2_enable_nvram_access(bp);
2956 pre_len = 4 - (offset & 3);
2958 if (pre_len >= len32) {
2960 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2961 BNX2_NVM_COMMAND_LAST;
2964 cmd_flags = BNX2_NVM_COMMAND_FIRST;
2967 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2972 memcpy(ret_buf, buf + (offset & 3), pre_len);
2979 extra = 4 - (len32 & 3);
2980 len32 = (len32 + 4) & ~3;
2987 cmd_flags = BNX2_NVM_COMMAND_LAST;
2989 cmd_flags = BNX2_NVM_COMMAND_FIRST |
2990 BNX2_NVM_COMMAND_LAST;
2992 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
2994 memcpy(ret_buf, buf, 4 - extra);
2996 else if (len32 > 0) {
2999 /* Read the first word. */
3003 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3005 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3007 /* Advance to the next dword. */
3012 while (len32 > 4 && rc == 0) {
3013 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3015 /* Advance to the next dword. */
3024 cmd_flags = BNX2_NVM_COMMAND_LAST;
3025 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3027 memcpy(ret_buf, buf, 4 - extra);
3030 /* Disable access to flash interface */
3031 bnx2_disable_nvram_access(bp);
3033 bnx2_release_nvram_lock(bp);
3039 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3042 u32 written, offset32, len32;
3043 u8 *buf, start[4], end[4], *flash_buffer = NULL;
3045 int align_start, align_end;
3050 align_start = align_end = 0;
3052 if ((align_start = (offset32 & 3))) {
3054 len32 += align_start;
3055 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3060 if ((len32 > 4) || !align_start) {
3061 align_end = 4 - (len32 & 3);
3063 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3070 if (align_start || align_end) {
3071 buf = kmalloc(len32, GFP_KERNEL);
3075 memcpy(buf, start, 4);
3078 memcpy(buf + len32 - 4, end, 4);
3080 memcpy(buf + align_start, data_buf, buf_size);
3083 if (bp->flash_info->buffered == 0) {
3084 flash_buffer = kmalloc(264, GFP_KERNEL);
3085 if (flash_buffer == NULL) {
3087 goto nvram_write_end;
3092 while ((written < len32) && (rc == 0)) {
3093 u32 page_start, page_end, data_start, data_end;
3094 u32 addr, cmd_flags;
3097 /* Find the page_start addr */
3098 page_start = offset32 + written;
3099 page_start -= (page_start % bp->flash_info->page_size);
3100 /* Find the page_end addr */
3101 page_end = page_start + bp->flash_info->page_size;
3102 /* Find the data_start addr */
3103 data_start = (written == 0) ? offset32 : page_start;
3104 /* Find the data_end addr */
3105 data_end = (page_end > offset32 + len32) ?
3106 (offset32 + len32) : page_end;
3108 /* Request access to the flash interface. */
3109 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3110 goto nvram_write_end;
3112 /* Enable access to flash interface */
3113 bnx2_enable_nvram_access(bp);
3115 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3116 if (bp->flash_info->buffered == 0) {
3119 /* Read the whole page into the buffer
3120 * (non-buffer flash only) */
3121 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3122 if (j == (bp->flash_info->page_size - 4)) {
3123 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3125 rc = bnx2_nvram_read_dword(bp,
3131 goto nvram_write_end;
3137 /* Enable writes to flash interface (unlock write-protect) */
3138 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3139 goto nvram_write_end;
3141 /* Erase the page */
3142 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3143 goto nvram_write_end;
3145 /* Re-enable the write again for the actual write */
3146 bnx2_enable_nvram_write(bp);
3148 /* Loop to write back the buffer data from page_start to
3151 if (bp->flash_info->buffered == 0) {
3152 for (addr = page_start; addr < data_start;
3153 addr += 4, i += 4) {
3155 rc = bnx2_nvram_write_dword(bp, addr,
3156 &flash_buffer[i], cmd_flags);
3159 goto nvram_write_end;
3165 /* Loop to write the new data from data_start to data_end */
3166 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3167 if ((addr == page_end - 4) ||
3168 ((bp->flash_info->buffered) &&
3169 (addr == data_end - 4))) {
3171 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3173 rc = bnx2_nvram_write_dword(bp, addr, buf,
3177 goto nvram_write_end;
3183 /* Loop to write back the buffer data from data_end
3185 if (bp->flash_info->buffered == 0) {
3186 for (addr = data_end; addr < page_end;
3187 addr += 4, i += 4) {
3189 if (addr == page_end-4) {
3190 cmd_flags = BNX2_NVM_COMMAND_LAST;
3192 rc = bnx2_nvram_write_dword(bp, addr,
3193 &flash_buffer[i], cmd_flags);
3196 goto nvram_write_end;
3202 /* Disable writes to flash interface (lock write-protect) */
3203 bnx2_disable_nvram_write(bp);
3205 /* Disable access to flash interface */
3206 bnx2_disable_nvram_access(bp);
3207 bnx2_release_nvram_lock(bp);
3209 /* Increment written */
3210 written += data_end - data_start;
3214 if (bp->flash_info->buffered == 0)
3215 kfree(flash_buffer);
3217 if (align_start || align_end)
3223 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3228 /* Wait for the current PCI transaction to complete before
3229 * issuing a reset. */
3230 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3231 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3232 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3233 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3234 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3235 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3238 /* Wait for the firmware to tell us it is ok to issue a reset. */
3239 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3241 /* Deposit a driver reset signature so the firmware knows that
3242 * this is a soft reset. */
3243 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3244 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3246 /* Do a dummy read to force the chip to complete all current transaction
3247 * before we issue a reset. */
3248 val = REG_RD(bp, BNX2_MISC_ID);
3250 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3251 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3252 REG_RD(bp, BNX2_MISC_COMMAND);
3255 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3256 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3258 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3261 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3262 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3263 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3266 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3268 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3269 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3270 current->state = TASK_UNINTERRUPTIBLE;
3271 schedule_timeout(HZ / 50);
3274 /* Reset takes approximate 30 usec */
3275 for (i = 0; i < 10; i++) {
3276 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3277 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3278 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3283 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3284 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3285 printk(KERN_ERR PFX "Chip reset did not complete\n");
3290 /* Make sure byte swapping is properly configured. */
3291 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3292 if (val != 0x01020304) {
3293 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3297 /* Wait for the firmware to finish its initialization. */
3298 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3302 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3303 /* Adjust the voltage regular to two steps lower. The default
3304 * of this register is 0x0000000e. */
3305 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3307 /* Remove bad rbuf memory from the free pool. */
3308 rc = bnx2_alloc_bad_rbuf(bp);
3315 bnx2_init_chip(struct bnx2 *bp)
3320 /* Make sure the interrupt is not active. */
3321 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3323 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3324 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3326 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3328 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3329 DMA_READ_CHANS << 12 |
3330 DMA_WRITE_CHANS << 16;
3332 val |= (0x2 << 20) | (1 << 11);
3334 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3337 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3338 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3339 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3341 REG_WR(bp, BNX2_DMA_CONFIG, val);
3343 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3344 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3345 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3346 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3349 if (bp->flags & PCIX_FLAG) {
3352 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3354 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3355 val16 & ~PCI_X_CMD_ERO);
3358 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3359 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3360 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3361 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3363 /* Initialize context mapping and zero out the quick contexts. The
3364 * context block must have already been enabled. */
3365 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3366 bnx2_init_5709_context(bp);
3368 bnx2_init_context(bp);
3370 if ((rc = bnx2_init_cpus(bp)) != 0)
3373 bnx2_init_nvram(bp);
3375 bnx2_set_mac_addr(bp);
3377 val = REG_RD(bp, BNX2_MQ_CONFIG);
3378 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3379 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3380 REG_WR(bp, BNX2_MQ_CONFIG, val);
3382 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3383 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3384 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3386 val = (BCM_PAGE_BITS - 8) << 24;
3387 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3389 /* Configure page size. */
3390 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3391 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3392 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3393 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3395 val = bp->mac_addr[0] +
3396 (bp->mac_addr[1] << 8) +
3397 (bp->mac_addr[2] << 16) +
3399 (bp->mac_addr[4] << 8) +
3400 (bp->mac_addr[5] << 16);
3401 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3403 /* Program the MTU. Also include 4 bytes for CRC32. */
3404 val = bp->dev->mtu + ETH_HLEN + 4;
3405 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3406 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3407 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3409 bp->last_status_idx = 0;
3410 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3412 /* Set up how to generate a link change interrupt. */
3413 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3415 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3416 (u64) bp->status_blk_mapping & 0xffffffff);
3417 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3419 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3420 (u64) bp->stats_blk_mapping & 0xffffffff);
3421 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3422 (u64) bp->stats_blk_mapping >> 32);
3424 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3425 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3427 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3428 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3430 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3431 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3433 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3435 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3437 REG_WR(bp, BNX2_HC_COM_TICKS,
3438 (bp->com_ticks_int << 16) | bp->com_ticks);
3440 REG_WR(bp, BNX2_HC_CMD_TICKS,
3441 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3443 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3444 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3446 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3447 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3449 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3450 BNX2_HC_CONFIG_TX_TMR_MODE |
3451 BNX2_HC_CONFIG_COLLECT_STATS);
3454 /* Clear internal stats counters. */
3455 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3457 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3459 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3460 BNX2_PORT_FEATURE_ASF_ENABLED)
3461 bp->flags |= ASF_ENABLE_FLAG;
3463 /* Initialize the receive filter. */
3464 bnx2_set_rx_mode(bp->dev);
3466 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3469 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3470 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3474 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3480 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3482 u32 val, offset0, offset1, offset2, offset3;
3484 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3485 offset0 = BNX2_L2CTX_TYPE_XI;
3486 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3487 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3488 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3490 offset0 = BNX2_L2CTX_TYPE;
3491 offset1 = BNX2_L2CTX_CMD_TYPE;
3492 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3493 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3495 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3496 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3498 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3499 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3501 val = (u64) bp->tx_desc_mapping >> 32;
3502 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3504 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3505 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3509 bnx2_init_tx_ring(struct bnx2 *bp)
3514 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3516 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3518 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3519 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3524 bp->tx_prod_bseq = 0;
3527 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3528 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3530 bnx2_init_tx_context(bp, cid);
3534 bnx2_init_rx_ring(struct bnx2 *bp)
3538 u16 prod, ring_prod;
3541 /* 8 for CRC and VLAN */
3542 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3544 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3546 ring_prod = prod = bp->rx_prod = 0;
3549 bp->rx_prod_bseq = 0;
3551 for (i = 0; i < bp->rx_max_ring; i++) {
3554 rxbd = &bp->rx_desc_ring[i][0];
3555 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3556 rxbd->rx_bd_len = bp->rx_buf_use_size;
3557 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3559 if (i == (bp->rx_max_ring - 1))
3563 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3564 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3568 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3569 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3571 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3573 val = (u64) bp->rx_desc_mapping[0] >> 32;
3574 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3576 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3577 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3579 for (i = 0; i < bp->rx_ring_size; i++) {
3580 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3583 prod = NEXT_RX_BD(prod);
3584 ring_prod = RX_RING_IDX(prod);
3588 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3590 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3594 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3598 bp->rx_ring_size = size;
3600 while (size > MAX_RX_DESC_CNT) {
3601 size -= MAX_RX_DESC_CNT;
3604 /* round to next power of 2 */
3606 while ((max & num_rings) == 0)
3609 if (num_rings != max)
3612 bp->rx_max_ring = max;
3613 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3617 bnx2_free_tx_skbs(struct bnx2 *bp)
3621 if (bp->tx_buf_ring == NULL)
3624 for (i = 0; i < TX_DESC_CNT; ) {
3625 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3626 struct sk_buff *skb = tx_buf->skb;
3634 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3635 skb_headlen(skb), PCI_DMA_TODEVICE);
3639 last = skb_shinfo(skb)->nr_frags;
3640 for (j = 0; j < last; j++) {
3641 tx_buf = &bp->tx_buf_ring[i + j + 1];
3642 pci_unmap_page(bp->pdev,
3643 pci_unmap_addr(tx_buf, mapping),
3644 skb_shinfo(skb)->frags[j].size,
3654 bnx2_free_rx_skbs(struct bnx2 *bp)
3658 if (bp->rx_buf_ring == NULL)
3661 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3662 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3663 struct sk_buff *skb = rx_buf->skb;
3668 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3669 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3678 bnx2_free_skbs(struct bnx2 *bp)
3680 bnx2_free_tx_skbs(bp);
3681 bnx2_free_rx_skbs(bp);
3685 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3689 rc = bnx2_reset_chip(bp, reset_code);
3694 if ((rc = bnx2_init_chip(bp)) != 0)
3697 bnx2_init_tx_ring(bp);
3698 bnx2_init_rx_ring(bp);
3703 bnx2_init_nic(struct bnx2 *bp)
3707 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3710 spin_lock_bh(&bp->phy_lock);
3712 spin_unlock_bh(&bp->phy_lock);
3718 bnx2_test_registers(struct bnx2 *bp)
3722 static const struct {
3728 { 0x006c, 0, 0x00000000, 0x0000003f },
3729 { 0x0090, 0, 0xffffffff, 0x00000000 },
3730 { 0x0094, 0, 0x00000000, 0x00000000 },
3732 { 0x0404, 0, 0x00003f00, 0x00000000 },
3733 { 0x0418, 0, 0x00000000, 0xffffffff },
3734 { 0x041c, 0, 0x00000000, 0xffffffff },
3735 { 0x0420, 0, 0x00000000, 0x80ffffff },
3736 { 0x0424, 0, 0x00000000, 0x00000000 },
3737 { 0x0428, 0, 0x00000000, 0x00000001 },
3738 { 0x0450, 0, 0x00000000, 0x0000ffff },
3739 { 0x0454, 0, 0x00000000, 0xffffffff },
3740 { 0x0458, 0, 0x00000000, 0xffffffff },
3742 { 0x0808, 0, 0x00000000, 0xffffffff },
3743 { 0x0854, 0, 0x00000000, 0xffffffff },
3744 { 0x0868, 0, 0x00000000, 0x77777777 },
3745 { 0x086c, 0, 0x00000000, 0x77777777 },
3746 { 0x0870, 0, 0x00000000, 0x77777777 },
3747 { 0x0874, 0, 0x00000000, 0x77777777 },
3749 { 0x0c00, 0, 0x00000000, 0x00000001 },
3750 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3751 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
3753 { 0x1000, 0, 0x00000000, 0x00000001 },
3754 { 0x1004, 0, 0x00000000, 0x000f0001 },
3756 { 0x1408, 0, 0x01c00800, 0x00000000 },
3757 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3758 { 0x14a8, 0, 0x00000000, 0x000001ff },
3759 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
3760 { 0x14b0, 0, 0x00000002, 0x00000001 },
3761 { 0x14b8, 0, 0x00000000, 0x00000000 },
3762 { 0x14c0, 0, 0x00000000, 0x00000009 },
3763 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3764 { 0x14cc, 0, 0x00000000, 0x00000001 },
3765 { 0x14d0, 0, 0xffffffff, 0x00000000 },
3767 { 0x1800, 0, 0x00000000, 0x00000001 },
3768 { 0x1804, 0, 0x00000000, 0x00000003 },
3770 { 0x2800, 0, 0x00000000, 0x00000001 },
3771 { 0x2804, 0, 0x00000000, 0x00003f01 },
3772 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3773 { 0x2810, 0, 0xffff0000, 0x00000000 },
3774 { 0x2814, 0, 0xffff0000, 0x00000000 },
3775 { 0x2818, 0, 0xffff0000, 0x00000000 },
3776 { 0x281c, 0, 0xffff0000, 0x00000000 },
3777 { 0x2834, 0, 0xffffffff, 0x00000000 },
3778 { 0x2840, 0, 0x00000000, 0xffffffff },
3779 { 0x2844, 0, 0x00000000, 0xffffffff },
3780 { 0x2848, 0, 0xffffffff, 0x00000000 },
3781 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3783 { 0x2c00, 0, 0x00000000, 0x00000011 },
3784 { 0x2c04, 0, 0x00000000, 0x00030007 },
3786 { 0x3c00, 0, 0x00000000, 0x00000001 },
3787 { 0x3c04, 0, 0x00000000, 0x00070000 },
3788 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3789 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3790 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3791 { 0x3c14, 0, 0x00000000, 0xffffffff },
3792 { 0x3c18, 0, 0x00000000, 0xffffffff },
3793 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3794 { 0x3c20, 0, 0xffffff00, 0x00000000 },
3796 { 0x5004, 0, 0x00000000, 0x0000007f },
3797 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3798 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3800 { 0x5c00, 0, 0x00000000, 0x00000001 },
3801 { 0x5c04, 0, 0x00000000, 0x0003000f },
3802 { 0x5c08, 0, 0x00000003, 0x00000000 },
3803 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3804 { 0x5c10, 0, 0x00000000, 0xffffffff },
3805 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3806 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3807 { 0x5c88, 0, 0x00000000, 0x00077373 },
3808 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3810 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3811 { 0x680c, 0, 0xffffffff, 0x00000000 },
3812 { 0x6810, 0, 0xffffffff, 0x00000000 },
3813 { 0x6814, 0, 0xffffffff, 0x00000000 },
3814 { 0x6818, 0, 0xffffffff, 0x00000000 },
3815 { 0x681c, 0, 0xffffffff, 0x00000000 },
3816 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3817 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3818 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3819 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3820 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3821 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3822 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3823 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3824 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3825 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3826 { 0x684c, 0, 0xffffffff, 0x00000000 },
3827 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3828 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3829 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3830 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3831 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3832 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3834 { 0xffff, 0, 0x00000000, 0x00000000 },
3838 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3839 u32 offset, rw_mask, ro_mask, save_val, val;
3841 offset = (u32) reg_tbl[i].offset;
3842 rw_mask = reg_tbl[i].rw_mask;
3843 ro_mask = reg_tbl[i].ro_mask;
3845 save_val = readl(bp->regview + offset);
3847 writel(0, bp->regview + offset);
3849 val = readl(bp->regview + offset);
3850 if ((val & rw_mask) != 0) {
3854 if ((val & ro_mask) != (save_val & ro_mask)) {
3858 writel(0xffffffff, bp->regview + offset);
3860 val = readl(bp->regview + offset);
3861 if ((val & rw_mask) != rw_mask) {
3865 if ((val & ro_mask) != (save_val & ro_mask)) {
3869 writel(save_val, bp->regview + offset);
3873 writel(save_val, bp->regview + offset);
3881 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3883 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
3884 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3887 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3890 for (offset = 0; offset < size; offset += 4) {
3892 REG_WR_IND(bp, start + offset, test_pattern[i]);
3894 if (REG_RD_IND(bp, start + offset) !=
3904 bnx2_test_memory(struct bnx2 *bp)
3908 static const struct {
3912 { 0x60000, 0x4000 },
3913 { 0xa0000, 0x3000 },
3914 { 0xe0000, 0x4000 },
3915 { 0x120000, 0x4000 },
3916 { 0x1a0000, 0x4000 },
3917 { 0x160000, 0x4000 },
3921 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3922 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3923 mem_tbl[i].len)) != 0) {
3931 #define BNX2_MAC_LOOPBACK 0
3932 #define BNX2_PHY_LOOPBACK 1
3935 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
3937 unsigned int pkt_size, num_pkts, i;
3938 struct sk_buff *skb, *rx_skb;
3939 unsigned char *packet;
3940 u16 rx_start_idx, rx_idx;
3943 struct sw_bd *rx_buf;
3944 struct l2_fhdr *rx_hdr;
3947 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3948 bp->loopback = MAC_LOOPBACK;
3949 bnx2_set_mac_loopback(bp);
3951 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
3952 bp->loopback = PHY_LOOPBACK;
3953 bnx2_set_phy_loopback(bp);
3959 skb = netdev_alloc_skb(bp->dev, pkt_size);
3962 packet = skb_put(skb, pkt_size);
3963 memcpy(packet, bp->mac_addr, 6);
3964 memset(packet + 6, 0x0, 8);
3965 for (i = 14; i < pkt_size; i++)
3966 packet[i] = (unsigned char) (i & 0xff);
3968 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3971 REG_WR(bp, BNX2_HC_COMMAND,
3972 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3974 REG_RD(bp, BNX2_HC_COMMAND);
3977 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3981 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
3983 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3984 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3985 txbd->tx_bd_mss_nbytes = pkt_size;
3986 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
3989 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
3990 bp->tx_prod_bseq += pkt_size;
3992 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
3993 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
3997 REG_WR(bp, BNX2_HC_COMMAND,
3998 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4000 REG_RD(bp, BNX2_HC_COMMAND);
4004 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4007 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4008 goto loopback_test_done;
4011 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4012 if (rx_idx != rx_start_idx + num_pkts) {
4013 goto loopback_test_done;
4016 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4017 rx_skb = rx_buf->skb;
4019 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4020 skb_reserve(rx_skb, bp->rx_offset);
4022 pci_dma_sync_single_for_cpu(bp->pdev,
4023 pci_unmap_addr(rx_buf, mapping),
4024 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4026 if (rx_hdr->l2_fhdr_status &
4027 (L2_FHDR_ERRORS_BAD_CRC |
4028 L2_FHDR_ERRORS_PHY_DECODE |
4029 L2_FHDR_ERRORS_ALIGNMENT |
4030 L2_FHDR_ERRORS_TOO_SHORT |
4031 L2_FHDR_ERRORS_GIANT_FRAME)) {
4033 goto loopback_test_done;
4036 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4037 goto loopback_test_done;
4040 for (i = 14; i < pkt_size; i++) {
4041 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4042 goto loopback_test_done;
4053 #define BNX2_MAC_LOOPBACK_FAILED 1
4054 #define BNX2_PHY_LOOPBACK_FAILED 2
4055 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4056 BNX2_PHY_LOOPBACK_FAILED)
4059 bnx2_test_loopback(struct bnx2 *bp)
4063 if (!netif_running(bp->dev))
4064 return BNX2_LOOPBACK_FAILED;
4066 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4067 spin_lock_bh(&bp->phy_lock);
4069 spin_unlock_bh(&bp->phy_lock);
4070 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4071 rc |= BNX2_MAC_LOOPBACK_FAILED;
4072 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4073 rc |= BNX2_PHY_LOOPBACK_FAILED;
4077 #define NVRAM_SIZE 0x200
4078 #define CRC32_RESIDUAL 0xdebb20e3
4081 bnx2_test_nvram(struct bnx2 *bp)
4083 u32 buf[NVRAM_SIZE / 4];
4084 u8 *data = (u8 *) buf;
4088 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4089 goto test_nvram_done;
4091 magic = be32_to_cpu(buf[0]);
4092 if (magic != 0x669955aa) {
4094 goto test_nvram_done;
4097 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4098 goto test_nvram_done;
4100 csum = ether_crc_le(0x100, data);
4101 if (csum != CRC32_RESIDUAL) {
4103 goto test_nvram_done;
4106 csum = ether_crc_le(0x100, data + 0x100);
4107 if (csum != CRC32_RESIDUAL) {
4116 bnx2_test_link(struct bnx2 *bp)
4120 spin_lock_bh(&bp->phy_lock);
4121 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4122 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4123 spin_unlock_bh(&bp->phy_lock);
4125 if (bmsr & BMSR_LSTATUS) {
4132 bnx2_test_intr(struct bnx2 *bp)
4137 if (!netif_running(bp->dev))
4140 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4142 /* This register is not touched during run-time. */
4143 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4144 REG_RD(bp, BNX2_HC_COMMAND);
4146 for (i = 0; i < 10; i++) {
4147 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4153 msleep_interruptible(10);
4162 bnx2_5706_serdes_timer(struct bnx2 *bp)
4164 spin_lock(&bp->phy_lock);
4165 if (bp->serdes_an_pending)
4166 bp->serdes_an_pending--;
4167 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4170 bp->current_interval = bp->timer_interval;
4172 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4174 if (bmcr & BMCR_ANENABLE) {
4177 bnx2_write_phy(bp, 0x1c, 0x7c00);
4178 bnx2_read_phy(bp, 0x1c, &phy1);
4180 bnx2_write_phy(bp, 0x17, 0x0f01);
4181 bnx2_read_phy(bp, 0x15, &phy2);
4182 bnx2_write_phy(bp, 0x17, 0x0f01);
4183 bnx2_read_phy(bp, 0x15, &phy2);
4185 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4186 !(phy2 & 0x20)) { /* no CONFIG */
4188 bmcr &= ~BMCR_ANENABLE;
4189 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4190 bnx2_write_phy(bp, MII_BMCR, bmcr);
4191 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4195 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4196 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4199 bnx2_write_phy(bp, 0x17, 0x0f01);
4200 bnx2_read_phy(bp, 0x15, &phy2);
4204 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4205 bmcr |= BMCR_ANENABLE;
4206 bnx2_write_phy(bp, MII_BMCR, bmcr);
4208 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4211 bp->current_interval = bp->timer_interval;
4213 spin_unlock(&bp->phy_lock);
4217 bnx2_5708_serdes_timer(struct bnx2 *bp)
4219 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4220 bp->serdes_an_pending = 0;
4224 spin_lock(&bp->phy_lock);
4225 if (bp->serdes_an_pending)
4226 bp->serdes_an_pending--;
4227 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4230 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4232 if (bmcr & BMCR_ANENABLE) {
4233 bmcr &= ~BMCR_ANENABLE;
4234 bmcr |= BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500;
4235 bnx2_write_phy(bp, MII_BMCR, bmcr);
4236 bp->current_interval = SERDES_FORCED_TIMEOUT;
4238 bmcr &= ~(BMCR_FULLDPLX | BCM5708S_BMCR_FORCE_2500);
4239 bmcr |= BMCR_ANENABLE;
4240 bnx2_write_phy(bp, MII_BMCR, bmcr);
4241 bp->serdes_an_pending = 2;
4242 bp->current_interval = bp->timer_interval;
4246 bp->current_interval = bp->timer_interval;
4248 spin_unlock(&bp->phy_lock);
4252 bnx2_timer(unsigned long data)
4254 struct bnx2 *bp = (struct bnx2 *) data;
4257 if (!netif_running(bp->dev))
4260 if (atomic_read(&bp->intr_sem) != 0)
4261 goto bnx2_restart_timer;
4263 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4264 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4266 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4268 if (bp->phy_flags & PHY_SERDES_FLAG) {
4269 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4270 bnx2_5706_serdes_timer(bp);
4271 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
4272 bnx2_5708_serdes_timer(bp);
4276 mod_timer(&bp->timer, jiffies + bp->current_interval);
4279 /* Called with rtnl_lock */
4281 bnx2_open(struct net_device *dev)
4283 struct bnx2 *bp = netdev_priv(dev);
4286 bnx2_set_power_state(bp, PCI_D0);
4287 bnx2_disable_int(bp);
4289 rc = bnx2_alloc_mem(bp);
4293 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4294 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4297 if (pci_enable_msi(bp->pdev) == 0) {
4298 bp->flags |= USING_MSI_FLAG;
4299 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4303 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4304 IRQF_SHARED, dev->name, dev);
4308 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4316 rc = bnx2_init_nic(bp);
4319 free_irq(bp->pdev->irq, dev);
4320 if (bp->flags & USING_MSI_FLAG) {
4321 pci_disable_msi(bp->pdev);
4322 bp->flags &= ~USING_MSI_FLAG;
4329 mod_timer(&bp->timer, jiffies + bp->current_interval);
4331 atomic_set(&bp->intr_sem, 0);
4333 bnx2_enable_int(bp);
4335 if (bp->flags & USING_MSI_FLAG) {
4336 /* Test MSI to make sure it is working
4337 * If MSI test fails, go back to INTx mode
4339 if (bnx2_test_intr(bp) != 0) {
4340 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4341 " using MSI, switching to INTx mode. Please"
4342 " report this failure to the PCI maintainer"
4343 " and include system chipset information.\n",
4346 bnx2_disable_int(bp);
4347 free_irq(bp->pdev->irq, dev);
4348 pci_disable_msi(bp->pdev);
4349 bp->flags &= ~USING_MSI_FLAG;
4351 rc = bnx2_init_nic(bp);
4354 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4355 IRQF_SHARED, dev->name, dev);
4360 del_timer_sync(&bp->timer);
4363 bnx2_enable_int(bp);
4366 if (bp->flags & USING_MSI_FLAG) {
4367 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4370 netif_start_queue(dev);
4376 bnx2_reset_task(void *data)
4378 struct bnx2 *bp = data;
4380 if (!netif_running(bp->dev))
4383 bp->in_reset_task = 1;
4384 bnx2_netif_stop(bp);
4388 atomic_set(&bp->intr_sem, 1);
4389 bnx2_netif_start(bp);
4390 bp->in_reset_task = 0;
4394 bnx2_tx_timeout(struct net_device *dev)
4396 struct bnx2 *bp = netdev_priv(dev);
4398 /* This allows the netif to be shutdown gracefully before resetting */
4399 schedule_work(&bp->reset_task);
4403 /* Called with rtnl_lock */
4405 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4407 struct bnx2 *bp = netdev_priv(dev);
4409 bnx2_netif_stop(bp);
4412 bnx2_set_rx_mode(dev);
4414 bnx2_netif_start(bp);
4417 /* Called with rtnl_lock */
4419 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4421 struct bnx2 *bp = netdev_priv(dev);
4423 bnx2_netif_stop(bp);
4426 bp->vlgrp->vlan_devices[vid] = NULL;
4427 bnx2_set_rx_mode(dev);
4429 bnx2_netif_start(bp);
4433 /* Called with netif_tx_lock.
4434 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4435 * netif_wake_queue().
4438 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4440 struct bnx2 *bp = netdev_priv(dev);
4443 struct sw_bd *tx_buf;
4444 u32 len, vlan_tag_flags, last_frag, mss;
4445 u16 prod, ring_prod;
4448 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4449 netif_stop_queue(dev);
4450 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4453 return NETDEV_TX_BUSY;
4455 len = skb_headlen(skb);
4457 ring_prod = TX_RING_IDX(prod);
4460 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4461 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4464 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4466 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4469 if ((mss = skb_shinfo(skb)->gso_size) &&
4470 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4471 u32 tcp_opt_len, ip_tcp_len;
4473 if (skb_header_cloned(skb) &&
4474 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4476 return NETDEV_TX_OK;
4479 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4480 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4483 if (skb->h.th->doff > 5) {
4484 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4486 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4488 skb->nh.iph->check = 0;
4489 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4491 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4495 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4496 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4497 (tcp_opt_len >> 2)) << 8;
4506 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4508 tx_buf = &bp->tx_buf_ring[ring_prod];
4510 pci_unmap_addr_set(tx_buf, mapping, mapping);
4512 txbd = &bp->tx_desc_ring[ring_prod];
4514 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4515 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4516 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4517 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4519 last_frag = skb_shinfo(skb)->nr_frags;
4521 for (i = 0; i < last_frag; i++) {
4522 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4524 prod = NEXT_TX_BD(prod);
4525 ring_prod = TX_RING_IDX(prod);
4526 txbd = &bp->tx_desc_ring[ring_prod];
4529 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4530 len, PCI_DMA_TODEVICE);
4531 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4534 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4535 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4536 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4537 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4540 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4542 prod = NEXT_TX_BD(prod);
4543 bp->tx_prod_bseq += skb->len;
4545 REG_WR16(bp, bp->tx_bidx_addr, prod);
4546 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4551 dev->trans_start = jiffies;
4553 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4554 netif_stop_queue(dev);
4555 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4556 netif_wake_queue(dev);
4559 return NETDEV_TX_OK;
4562 /* Called with rtnl_lock */
4564 bnx2_close(struct net_device *dev)
4566 struct bnx2 *bp = netdev_priv(dev);
4569 /* Calling flush_scheduled_work() may deadlock because
4570 * linkwatch_event() may be on the workqueue and it will try to get
4571 * the rtnl_lock which we are holding.
4573 while (bp->in_reset_task)
4576 bnx2_netif_stop(bp);
4577 del_timer_sync(&bp->timer);
4578 if (bp->flags & NO_WOL_FLAG)
4579 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4581 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4583 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4584 bnx2_reset_chip(bp, reset_code);
4585 free_irq(bp->pdev->irq, dev);
4586 if (bp->flags & USING_MSI_FLAG) {
4587 pci_disable_msi(bp->pdev);
4588 bp->flags &= ~USING_MSI_FLAG;
4593 netif_carrier_off(bp->dev);
4594 bnx2_set_power_state(bp, PCI_D3hot);
4598 #define GET_NET_STATS64(ctr) \
4599 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4600 (unsigned long) (ctr##_lo)
4602 #define GET_NET_STATS32(ctr) \
4605 #if (BITS_PER_LONG == 64)
4606 #define GET_NET_STATS GET_NET_STATS64
4608 #define GET_NET_STATS GET_NET_STATS32
4611 static struct net_device_stats *
4612 bnx2_get_stats(struct net_device *dev)
4614 struct bnx2 *bp = netdev_priv(dev);
4615 struct statistics_block *stats_blk = bp->stats_blk;
4616 struct net_device_stats *net_stats = &bp->net_stats;
4618 if (bp->stats_blk == NULL) {
4621 net_stats->rx_packets =
4622 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4623 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4624 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4626 net_stats->tx_packets =
4627 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4628 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4629 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4631 net_stats->rx_bytes =
4632 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4634 net_stats->tx_bytes =
4635 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4637 net_stats->multicast =
4638 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4640 net_stats->collisions =
4641 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4643 net_stats->rx_length_errors =
4644 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4645 stats_blk->stat_EtherStatsOverrsizePkts);
4647 net_stats->rx_over_errors =
4648 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4650 net_stats->rx_frame_errors =
4651 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4653 net_stats->rx_crc_errors =
4654 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4656 net_stats->rx_errors = net_stats->rx_length_errors +
4657 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4658 net_stats->rx_crc_errors;
4660 net_stats->tx_aborted_errors =
4661 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4662 stats_blk->stat_Dot3StatsLateCollisions);
4664 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4665 (CHIP_ID(bp) == CHIP_ID_5708_A0))
4666 net_stats->tx_carrier_errors = 0;
4668 net_stats->tx_carrier_errors =
4670 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4673 net_stats->tx_errors =
4675 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4677 net_stats->tx_aborted_errors +
4678 net_stats->tx_carrier_errors;
4680 net_stats->rx_missed_errors =
4681 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4682 stats_blk->stat_FwRxDrop);
4687 /* All ethtool functions called with rtnl_lock */
4690 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4692 struct bnx2 *bp = netdev_priv(dev);
4694 cmd->supported = SUPPORTED_Autoneg;
4695 if (bp->phy_flags & PHY_SERDES_FLAG) {
4696 cmd->supported |= SUPPORTED_1000baseT_Full |
4699 cmd->port = PORT_FIBRE;
4702 cmd->supported |= SUPPORTED_10baseT_Half |
4703 SUPPORTED_10baseT_Full |
4704 SUPPORTED_100baseT_Half |
4705 SUPPORTED_100baseT_Full |
4706 SUPPORTED_1000baseT_Full |
4709 cmd->port = PORT_TP;
4712 cmd->advertising = bp->advertising;
4714 if (bp->autoneg & AUTONEG_SPEED) {
4715 cmd->autoneg = AUTONEG_ENABLE;
4718 cmd->autoneg = AUTONEG_DISABLE;
4721 if (netif_carrier_ok(dev)) {
4722 cmd->speed = bp->line_speed;
4723 cmd->duplex = bp->duplex;
4730 cmd->transceiver = XCVR_INTERNAL;
4731 cmd->phy_address = bp->phy_addr;
4737 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4739 struct bnx2 *bp = netdev_priv(dev);
4740 u8 autoneg = bp->autoneg;
4741 u8 req_duplex = bp->req_duplex;
4742 u16 req_line_speed = bp->req_line_speed;
4743 u32 advertising = bp->advertising;
4745 if (cmd->autoneg == AUTONEG_ENABLE) {
4746 autoneg |= AUTONEG_SPEED;
4748 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
4750 /* allow advertising 1 speed */
4751 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4752 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4753 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4754 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4756 if (bp->phy_flags & PHY_SERDES_FLAG)
4759 advertising = cmd->advertising;
4762 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4763 advertising = cmd->advertising;
4765 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4769 if (bp->phy_flags & PHY_SERDES_FLAG) {
4770 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4773 advertising = ETHTOOL_ALL_COPPER_SPEED;
4776 advertising |= ADVERTISED_Autoneg;
4779 if (bp->phy_flags & PHY_SERDES_FLAG) {
4780 if ((cmd->speed != SPEED_1000 &&
4781 cmd->speed != SPEED_2500) ||
4782 (cmd->duplex != DUPLEX_FULL))
4785 if (cmd->speed == SPEED_2500 &&
4786 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
4789 else if (cmd->speed == SPEED_1000) {
4792 autoneg &= ~AUTONEG_SPEED;
4793 req_line_speed = cmd->speed;
4794 req_duplex = cmd->duplex;
4798 bp->autoneg = autoneg;
4799 bp->advertising = advertising;
4800 bp->req_line_speed = req_line_speed;
4801 bp->req_duplex = req_duplex;
4803 spin_lock_bh(&bp->phy_lock);
4807 spin_unlock_bh(&bp->phy_lock);
4813 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4815 struct bnx2 *bp = netdev_priv(dev);
4817 strcpy(info->driver, DRV_MODULE_NAME);
4818 strcpy(info->version, DRV_MODULE_VERSION);
4819 strcpy(info->bus_info, pci_name(bp->pdev));
4820 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4821 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4822 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
4823 info->fw_version[1] = info->fw_version[3] = '.';
4824 info->fw_version[5] = 0;
4827 #define BNX2_REGDUMP_LEN (32 * 1024)
4830 bnx2_get_regs_len(struct net_device *dev)
4832 return BNX2_REGDUMP_LEN;
4836 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4838 u32 *p = _p, i, offset;
4840 struct bnx2 *bp = netdev_priv(dev);
4841 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4842 0x0800, 0x0880, 0x0c00, 0x0c10,
4843 0x0c30, 0x0d08, 0x1000, 0x101c,
4844 0x1040, 0x1048, 0x1080, 0x10a4,
4845 0x1400, 0x1490, 0x1498, 0x14f0,
4846 0x1500, 0x155c, 0x1580, 0x15dc,
4847 0x1600, 0x1658, 0x1680, 0x16d8,
4848 0x1800, 0x1820, 0x1840, 0x1854,
4849 0x1880, 0x1894, 0x1900, 0x1984,
4850 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4851 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4852 0x2000, 0x2030, 0x23c0, 0x2400,
4853 0x2800, 0x2820, 0x2830, 0x2850,
4854 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4855 0x3c00, 0x3c94, 0x4000, 0x4010,
4856 0x4080, 0x4090, 0x43c0, 0x4458,
4857 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4858 0x4fc0, 0x5010, 0x53c0, 0x5444,
4859 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4860 0x5fc0, 0x6000, 0x6400, 0x6428,
4861 0x6800, 0x6848, 0x684c, 0x6860,
4862 0x6888, 0x6910, 0x8000 };
4866 memset(p, 0, BNX2_REGDUMP_LEN);
4868 if (!netif_running(bp->dev))
4872 offset = reg_boundaries[0];
4874 while (offset < BNX2_REGDUMP_LEN) {
4875 *p++ = REG_RD(bp, offset);
4877 if (offset == reg_boundaries[i + 1]) {
4878 offset = reg_boundaries[i + 2];
4879 p = (u32 *) (orig_p + offset);
4886 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4888 struct bnx2 *bp = netdev_priv(dev);
4890 if (bp->flags & NO_WOL_FLAG) {
4895 wol->supported = WAKE_MAGIC;
4897 wol->wolopts = WAKE_MAGIC;
4901 memset(&wol->sopass, 0, sizeof(wol->sopass));
4905 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4907 struct bnx2 *bp = netdev_priv(dev);
4909 if (wol->wolopts & ~WAKE_MAGIC)
4912 if (wol->wolopts & WAKE_MAGIC) {
4913 if (bp->flags & NO_WOL_FLAG)
4925 bnx2_nway_reset(struct net_device *dev)
4927 struct bnx2 *bp = netdev_priv(dev);
4930 if (!(bp->autoneg & AUTONEG_SPEED)) {
4934 spin_lock_bh(&bp->phy_lock);
4936 /* Force a link down visible on the other side */
4937 if (bp->phy_flags & PHY_SERDES_FLAG) {
4938 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
4939 spin_unlock_bh(&bp->phy_lock);
4943 spin_lock_bh(&bp->phy_lock);
4945 bp->current_interval = SERDES_AN_TIMEOUT;
4946 bp->serdes_an_pending = 1;
4947 mod_timer(&bp->timer, jiffies + bp->current_interval);
4950 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4951 bmcr &= ~BMCR_LOOPBACK;
4952 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4954 spin_unlock_bh(&bp->phy_lock);
4960 bnx2_get_eeprom_len(struct net_device *dev)
4962 struct bnx2 *bp = netdev_priv(dev);
4964 if (bp->flash_info == NULL)
4967 return (int) bp->flash_size;
4971 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4974 struct bnx2 *bp = netdev_priv(dev);
4977 /* parameters already validated in ethtool_get_eeprom */
4979 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4985 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4988 struct bnx2 *bp = netdev_priv(dev);
4991 /* parameters already validated in ethtool_set_eeprom */
4993 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4999 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5001 struct bnx2 *bp = netdev_priv(dev);
5003 memset(coal, 0, sizeof(struct ethtool_coalesce));
5005 coal->rx_coalesce_usecs = bp->rx_ticks;
5006 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5007 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5008 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5010 coal->tx_coalesce_usecs = bp->tx_ticks;
5011 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5012 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5013 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5015 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5021 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5023 struct bnx2 *bp = netdev_priv(dev);
5025 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5026 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5028 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5029 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5031 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5032 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5034 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5035 if (bp->rx_quick_cons_trip_int > 0xff)
5036 bp->rx_quick_cons_trip_int = 0xff;
5038 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5039 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5041 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5042 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5044 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5045 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5047 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5048 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5051 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5052 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5053 bp->stats_ticks &= 0xffff00;
5055 if (netif_running(bp->dev)) {
5056 bnx2_netif_stop(bp);
5058 bnx2_netif_start(bp);
5065 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5067 struct bnx2 *bp = netdev_priv(dev);
5069 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5070 ering->rx_mini_max_pending = 0;
5071 ering->rx_jumbo_max_pending = 0;
5073 ering->rx_pending = bp->rx_ring_size;
5074 ering->rx_mini_pending = 0;
5075 ering->rx_jumbo_pending = 0;
5077 ering->tx_max_pending = MAX_TX_DESC_CNT;
5078 ering->tx_pending = bp->tx_ring_size;
5082 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5084 struct bnx2 *bp = netdev_priv(dev);
5086 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5087 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5088 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5092 if (netif_running(bp->dev)) {
5093 bnx2_netif_stop(bp);
5094 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5099 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5100 bp->tx_ring_size = ering->tx_pending;
5102 if (netif_running(bp->dev)) {
5105 rc = bnx2_alloc_mem(bp);
5109 bnx2_netif_start(bp);
5116 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5118 struct bnx2 *bp = netdev_priv(dev);
5120 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5121 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5122 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5126 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5128 struct bnx2 *bp = netdev_priv(dev);
5130 bp->req_flow_ctrl = 0;
5131 if (epause->rx_pause)
5132 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5133 if (epause->tx_pause)
5134 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5136 if (epause->autoneg) {
5137 bp->autoneg |= AUTONEG_FLOW_CTRL;
5140 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5143 spin_lock_bh(&bp->phy_lock);
5147 spin_unlock_bh(&bp->phy_lock);
5153 bnx2_get_rx_csum(struct net_device *dev)
5155 struct bnx2 *bp = netdev_priv(dev);
5161 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5163 struct bnx2 *bp = netdev_priv(dev);
5170 bnx2_set_tso(struct net_device *dev, u32 data)
5173 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5175 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5179 #define BNX2_NUM_STATS 46
5182 char string[ETH_GSTRING_LEN];
5183 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5185 { "rx_error_bytes" },
5187 { "tx_error_bytes" },
5188 { "rx_ucast_packets" },
5189 { "rx_mcast_packets" },
5190 { "rx_bcast_packets" },
5191 { "tx_ucast_packets" },
5192 { "tx_mcast_packets" },
5193 { "tx_bcast_packets" },
5194 { "tx_mac_errors" },
5195 { "tx_carrier_errors" },
5196 { "rx_crc_errors" },
5197 { "rx_align_errors" },
5198 { "tx_single_collisions" },
5199 { "tx_multi_collisions" },
5201 { "tx_excess_collisions" },
5202 { "tx_late_collisions" },
5203 { "tx_total_collisions" },
5206 { "rx_undersize_packets" },
5207 { "rx_oversize_packets" },
5208 { "rx_64_byte_packets" },
5209 { "rx_65_to_127_byte_packets" },
5210 { "rx_128_to_255_byte_packets" },
5211 { "rx_256_to_511_byte_packets" },
5212 { "rx_512_to_1023_byte_packets" },
5213 { "rx_1024_to_1522_byte_packets" },
5214 { "rx_1523_to_9022_byte_packets" },
5215 { "tx_64_byte_packets" },
5216 { "tx_65_to_127_byte_packets" },
5217 { "tx_128_to_255_byte_packets" },
5218 { "tx_256_to_511_byte_packets" },
5219 { "tx_512_to_1023_byte_packets" },
5220 { "tx_1024_to_1522_byte_packets" },
5221 { "tx_1523_to_9022_byte_packets" },
5222 { "rx_xon_frames" },
5223 { "rx_xoff_frames" },
5224 { "tx_xon_frames" },
5225 { "tx_xoff_frames" },
5226 { "rx_mac_ctrl_frames" },
5227 { "rx_filtered_packets" },
5229 { "rx_fw_discards" },
5232 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5234 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5235 STATS_OFFSET32(stat_IfHCInOctets_hi),
5236 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5237 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5238 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5239 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5240 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5241 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5242 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5243 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5244 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5245 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5246 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5247 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5248 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5249 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5250 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5251 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5252 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5253 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5254 STATS_OFFSET32(stat_EtherStatsCollisions),
5255 STATS_OFFSET32(stat_EtherStatsFragments),
5256 STATS_OFFSET32(stat_EtherStatsJabbers),
5257 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5258 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5259 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5260 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5261 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5262 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5263 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5264 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5265 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5266 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5267 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5268 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5269 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5270 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5271 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5272 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5273 STATS_OFFSET32(stat_XonPauseFramesReceived),
5274 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5275 STATS_OFFSET32(stat_OutXonSent),
5276 STATS_OFFSET32(stat_OutXoffSent),
5277 STATS_OFFSET32(stat_MacControlFramesReceived),
5278 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5279 STATS_OFFSET32(stat_IfInMBUFDiscards),
5280 STATS_OFFSET32(stat_FwRxDrop),
5283 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5284 * skipped because of errata.
5286 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5287 8,0,8,8,8,8,8,8,8,8,
5288 4,0,4,4,4,4,4,4,4,4,
5289 4,4,4,4,4,4,4,4,4,4,
5290 4,4,4,4,4,4,4,4,4,4,
5294 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5295 8,0,8,8,8,8,8,8,8,8,
5296 4,4,4,4,4,4,4,4,4,4,
5297 4,4,4,4,4,4,4,4,4,4,
5298 4,4,4,4,4,4,4,4,4,4,
5302 #define BNX2_NUM_TESTS 6
5305 char string[ETH_GSTRING_LEN];
5306 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5307 { "register_test (offline)" },
5308 { "memory_test (offline)" },
5309 { "loopback_test (offline)" },
5310 { "nvram_test (online)" },
5311 { "interrupt_test (online)" },
5312 { "link_test (online)" },
5316 bnx2_self_test_count(struct net_device *dev)
5318 return BNX2_NUM_TESTS;
5322 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5324 struct bnx2 *bp = netdev_priv(dev);
5326 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5327 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5330 bnx2_netif_stop(bp);
5331 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5334 if (bnx2_test_registers(bp) != 0) {
5336 etest->flags |= ETH_TEST_FL_FAILED;
5338 if (bnx2_test_memory(bp) != 0) {
5340 etest->flags |= ETH_TEST_FL_FAILED;
5342 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5343 etest->flags |= ETH_TEST_FL_FAILED;
5345 if (!netif_running(bp->dev)) {
5346 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5350 bnx2_netif_start(bp);
5353 /* wait for link up */
5354 for (i = 0; i < 7; i++) {
5357 msleep_interruptible(1000);
5361 if (bnx2_test_nvram(bp) != 0) {
5363 etest->flags |= ETH_TEST_FL_FAILED;
5365 if (bnx2_test_intr(bp) != 0) {
5367 etest->flags |= ETH_TEST_FL_FAILED;
5370 if (bnx2_test_link(bp) != 0) {
5372 etest->flags |= ETH_TEST_FL_FAILED;
5378 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5380 switch (stringset) {
5382 memcpy(buf, bnx2_stats_str_arr,
5383 sizeof(bnx2_stats_str_arr));
5386 memcpy(buf, bnx2_tests_str_arr,
5387 sizeof(bnx2_tests_str_arr));
5393 bnx2_get_stats_count(struct net_device *dev)
5395 return BNX2_NUM_STATS;
5399 bnx2_get_ethtool_stats(struct net_device *dev,
5400 struct ethtool_stats *stats, u64 *buf)
5402 struct bnx2 *bp = netdev_priv(dev);
5404 u32 *hw_stats = (u32 *) bp->stats_blk;
5405 u8 *stats_len_arr = NULL;
5407 if (hw_stats == NULL) {
5408 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5412 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5413 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5414 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5415 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5416 stats_len_arr = bnx2_5706_stats_len_arr;
5418 stats_len_arr = bnx2_5708_stats_len_arr;
5420 for (i = 0; i < BNX2_NUM_STATS; i++) {
5421 if (stats_len_arr[i] == 0) {
5422 /* skip this counter */
5426 if (stats_len_arr[i] == 4) {
5427 /* 4-byte counter */
5429 *(hw_stats + bnx2_stats_offset_arr[i]);
5432 /* 8-byte counter */
5433 buf[i] = (((u64) *(hw_stats +
5434 bnx2_stats_offset_arr[i])) << 32) +
5435 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5440 bnx2_phys_id(struct net_device *dev, u32 data)
5442 struct bnx2 *bp = netdev_priv(dev);
5449 save = REG_RD(bp, BNX2_MISC_CFG);
5450 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5452 for (i = 0; i < (data * 2); i++) {
5454 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5457 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5458 BNX2_EMAC_LED_1000MB_OVERRIDE |
5459 BNX2_EMAC_LED_100MB_OVERRIDE |
5460 BNX2_EMAC_LED_10MB_OVERRIDE |
5461 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5462 BNX2_EMAC_LED_TRAFFIC);
5464 msleep_interruptible(500);
5465 if (signal_pending(current))
5468 REG_WR(bp, BNX2_EMAC_LED, 0);
5469 REG_WR(bp, BNX2_MISC_CFG, save);
5473 static const struct ethtool_ops bnx2_ethtool_ops = {
5474 .get_settings = bnx2_get_settings,
5475 .set_settings = bnx2_set_settings,
5476 .get_drvinfo = bnx2_get_drvinfo,
5477 .get_regs_len = bnx2_get_regs_len,
5478 .get_regs = bnx2_get_regs,
5479 .get_wol = bnx2_get_wol,
5480 .set_wol = bnx2_set_wol,
5481 .nway_reset = bnx2_nway_reset,
5482 .get_link = ethtool_op_get_link,
5483 .get_eeprom_len = bnx2_get_eeprom_len,
5484 .get_eeprom = bnx2_get_eeprom,
5485 .set_eeprom = bnx2_set_eeprom,
5486 .get_coalesce = bnx2_get_coalesce,
5487 .set_coalesce = bnx2_set_coalesce,
5488 .get_ringparam = bnx2_get_ringparam,
5489 .set_ringparam = bnx2_set_ringparam,
5490 .get_pauseparam = bnx2_get_pauseparam,
5491 .set_pauseparam = bnx2_set_pauseparam,
5492 .get_rx_csum = bnx2_get_rx_csum,
5493 .set_rx_csum = bnx2_set_rx_csum,
5494 .get_tx_csum = ethtool_op_get_tx_csum,
5495 .set_tx_csum = ethtool_op_set_tx_csum,
5496 .get_sg = ethtool_op_get_sg,
5497 .set_sg = ethtool_op_set_sg,
5499 .get_tso = ethtool_op_get_tso,
5500 .set_tso = bnx2_set_tso,
5502 .self_test_count = bnx2_self_test_count,
5503 .self_test = bnx2_self_test,
5504 .get_strings = bnx2_get_strings,
5505 .phys_id = bnx2_phys_id,
5506 .get_stats_count = bnx2_get_stats_count,
5507 .get_ethtool_stats = bnx2_get_ethtool_stats,
5508 .get_perm_addr = ethtool_op_get_perm_addr,
5511 /* Called with rtnl_lock */
5513 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5515 struct mii_ioctl_data *data = if_mii(ifr);
5516 struct bnx2 *bp = netdev_priv(dev);
5521 data->phy_id = bp->phy_addr;
5527 spin_lock_bh(&bp->phy_lock);
5528 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5529 spin_unlock_bh(&bp->phy_lock);
5531 data->val_out = mii_regval;
5537 if (!capable(CAP_NET_ADMIN))
5540 spin_lock_bh(&bp->phy_lock);
5541 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5542 spin_unlock_bh(&bp->phy_lock);
5553 /* Called with rtnl_lock */
5555 bnx2_change_mac_addr(struct net_device *dev, void *p)
5557 struct sockaddr *addr = p;
5558 struct bnx2 *bp = netdev_priv(dev);
5560 if (!is_valid_ether_addr(addr->sa_data))
5563 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5564 if (netif_running(dev))
5565 bnx2_set_mac_addr(bp);
5570 /* Called with rtnl_lock */
5572 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5574 struct bnx2 *bp = netdev_priv(dev);
5576 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5577 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5581 if (netif_running(dev)) {
5582 bnx2_netif_stop(bp);
5586 bnx2_netif_start(bp);
5591 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5593 poll_bnx2(struct net_device *dev)
5595 struct bnx2 *bp = netdev_priv(dev);
5597 disable_irq(bp->pdev->irq);
5598 bnx2_interrupt(bp->pdev->irq, dev);
5599 enable_irq(bp->pdev->irq);
5603 static int __devinit
5604 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5607 unsigned long mem_len;
5611 SET_MODULE_OWNER(dev);
5612 SET_NETDEV_DEV(dev, &pdev->dev);
5613 bp = netdev_priv(dev);
5618 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5619 rc = pci_enable_device(pdev);
5621 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
5625 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5627 "Cannot find PCI device base address, aborting.\n");
5629 goto err_out_disable;
5632 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5634 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
5635 goto err_out_disable;
5638 pci_set_master(pdev);
5640 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5641 if (bp->pm_cap == 0) {
5643 "Cannot find power management capability, aborting.\n");
5645 goto err_out_release;
5648 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5649 bp->flags |= USING_DAC_FLAG;
5650 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
5652 "pci_set_consistent_dma_mask failed, aborting.\n");
5654 goto err_out_release;
5657 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
5658 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
5660 goto err_out_release;
5666 spin_lock_init(&bp->phy_lock);
5667 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5669 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5670 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
5671 dev->mem_end = dev->mem_start + mem_len;
5672 dev->irq = pdev->irq;
5674 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5677 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
5679 goto err_out_release;
5682 /* Configure byte swap and enable write to the reg_window registers.
5683 * Rely on CPU to do target byte swapping on big endian systems
5684 * The chip's target access swapping will not swap all accesses
5686 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5687 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5688 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5690 bnx2_set_power_state(bp, PCI_D0);
5692 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5694 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
5695 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5696 if (bp->pcix_cap == 0) {
5698 "Cannot find PCIX capability, aborting.\n");
5704 /* Get bus information. */
5705 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5706 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5709 bp->flags |= PCIX_FLAG;
5711 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
5713 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5715 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5716 bp->bus_speed_mhz = 133;
5719 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5720 bp->bus_speed_mhz = 100;
5723 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5724 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5725 bp->bus_speed_mhz = 66;
5728 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5729 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5730 bp->bus_speed_mhz = 50;
5733 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5734 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5736 bp->bus_speed_mhz = 33;
5741 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5742 bp->bus_speed_mhz = 66;
5744 bp->bus_speed_mhz = 33;
5747 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5748 bp->flags |= PCI_32BIT_FLAG;
5750 /* 5706A0 may falsely detect SERR and PERR. */
5751 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5752 reg = REG_RD(bp, PCI_COMMAND);
5753 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5754 REG_WR(bp, PCI_COMMAND, reg);
5756 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5757 !(bp->flags & PCIX_FLAG)) {
5760 "5706 A1 can only be used in a PCIX bus, aborting.\n");
5764 bnx2_init_nvram(bp);
5766 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5768 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5769 BNX2_SHM_HDR_SIGNATURE_SIG)
5770 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5772 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5774 /* Get the permanent MAC address. First we need to make sure the
5775 * firmware is actually running.
5777 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
5779 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5780 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
5781 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
5786 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
5788 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
5789 bp->mac_addr[0] = (u8) (reg >> 8);
5790 bp->mac_addr[1] = (u8) reg;
5792 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
5793 bp->mac_addr[2] = (u8) (reg >> 24);
5794 bp->mac_addr[3] = (u8) (reg >> 16);
5795 bp->mac_addr[4] = (u8) (reg >> 8);
5796 bp->mac_addr[5] = (u8) reg;
5798 bp->tx_ring_size = MAX_TX_DESC_CNT;
5799 bnx2_set_rx_ring_size(bp, 255);
5803 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5805 bp->tx_quick_cons_trip_int = 20;
5806 bp->tx_quick_cons_trip = 20;
5807 bp->tx_ticks_int = 80;
5810 bp->rx_quick_cons_trip_int = 6;
5811 bp->rx_quick_cons_trip = 6;
5812 bp->rx_ticks_int = 18;
5815 bp->stats_ticks = 1000000 & 0xffff00;
5817 bp->timer_interval = HZ;
5818 bp->current_interval = HZ;
5822 /* Disable WOL support if we are running on a SERDES chip. */
5823 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5824 bp->phy_flags |= PHY_SERDES_FLAG;
5825 bp->flags |= NO_WOL_FLAG;
5826 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5828 reg = REG_RD_IND(bp, bp->shmem_base +
5829 BNX2_SHARED_HW_CFG_CONFIG);
5830 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5831 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5835 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5836 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5837 (CHIP_ID(bp) == CHIP_ID_5708_B1))
5838 bp->flags |= NO_WOL_FLAG;
5840 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5841 bp->tx_quick_cons_trip_int =
5842 bp->tx_quick_cons_trip;
5843 bp->tx_ticks_int = bp->tx_ticks;
5844 bp->rx_quick_cons_trip_int =
5845 bp->rx_quick_cons_trip;
5846 bp->rx_ticks_int = bp->rx_ticks;
5847 bp->comp_prod_trip_int = bp->comp_prod_trip;
5848 bp->com_ticks_int = bp->com_ticks;
5849 bp->cmd_ticks_int = bp->cmd_ticks;
5852 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5854 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5855 * with byte enables disabled on the unused 32-bit word. This is legal
5856 * but causes problems on the AMD 8132 which will eventually stop
5857 * responding after a while.
5859 * AMD believes this incompatibility is unique to the 5706, and
5860 * prefers to locally disable MSI rather than globally disabling it
5861 * using pci_msi_quirk.
5863 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5864 struct pci_dev *amd_8132 = NULL;
5866 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5867 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5871 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5872 if (rev >= 0x10 && rev <= 0x13) {
5874 pci_dev_put(amd_8132);
5880 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5881 bp->req_line_speed = 0;
5882 if (bp->phy_flags & PHY_SERDES_FLAG) {
5883 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
5885 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
5886 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5887 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5889 bp->req_line_speed = bp->line_speed = SPEED_1000;
5890 bp->req_duplex = DUPLEX_FULL;
5894 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5897 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5899 init_timer(&bp->timer);
5900 bp->timer.expires = RUN_AT(bp->timer_interval);
5901 bp->timer.data = (unsigned long) bp;
5902 bp->timer.function = bnx2_timer;
5908 iounmap(bp->regview);
5913 pci_release_regions(pdev);
5916 pci_disable_device(pdev);
5917 pci_set_drvdata(pdev, NULL);
5923 static int __devinit
5924 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5926 static int version_printed = 0;
5927 struct net_device *dev = NULL;
5931 if (version_printed++ == 0)
5932 printk(KERN_INFO "%s", version);
5934 /* dev zeroed in init_etherdev */
5935 dev = alloc_etherdev(sizeof(*bp));
5940 rc = bnx2_init_board(pdev, dev);
5946 dev->open = bnx2_open;
5947 dev->hard_start_xmit = bnx2_start_xmit;
5948 dev->stop = bnx2_close;
5949 dev->get_stats = bnx2_get_stats;
5950 dev->set_multicast_list = bnx2_set_rx_mode;
5951 dev->do_ioctl = bnx2_ioctl;
5952 dev->set_mac_address = bnx2_change_mac_addr;
5953 dev->change_mtu = bnx2_change_mtu;
5954 dev->tx_timeout = bnx2_tx_timeout;
5955 dev->watchdog_timeo = TX_TIMEOUT;
5957 dev->vlan_rx_register = bnx2_vlan_rx_register;
5958 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5960 dev->poll = bnx2_poll;
5961 dev->ethtool_ops = &bnx2_ethtool_ops;
5964 bp = netdev_priv(dev);
5966 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5967 dev->poll_controller = poll_bnx2;
5970 if ((rc = register_netdev(dev))) {
5971 dev_err(&pdev->dev, "Cannot register net device\n");
5973 iounmap(bp->regview);
5974 pci_release_regions(pdev);
5975 pci_disable_device(pdev);
5976 pci_set_drvdata(pdev, NULL);
5981 pci_set_drvdata(pdev, dev);
5983 memcpy(dev->dev_addr, bp->mac_addr, 6);
5984 memcpy(dev->perm_addr, bp->mac_addr, 6);
5985 bp->name = board_info[ent->driver_data].name,
5986 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5990 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5991 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5992 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5993 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5998 printk("node addr ");
5999 for (i = 0; i < 6; i++)
6000 printk("%2.2x", dev->dev_addr[i]);
6003 dev->features |= NETIF_F_SG;
6004 if (bp->flags & USING_DAC_FLAG)
6005 dev->features |= NETIF_F_HIGHDMA;
6006 dev->features |= NETIF_F_IP_CSUM;
6008 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6011 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6014 netif_carrier_off(bp->dev);
6019 static void __devexit
6020 bnx2_remove_one(struct pci_dev *pdev)
6022 struct net_device *dev = pci_get_drvdata(pdev);
6023 struct bnx2 *bp = netdev_priv(dev);
6025 flush_scheduled_work();
6027 unregister_netdev(dev);
6030 iounmap(bp->regview);
6033 pci_release_regions(pdev);
6034 pci_disable_device(pdev);
6035 pci_set_drvdata(pdev, NULL);
6039 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6041 struct net_device *dev = pci_get_drvdata(pdev);
6042 struct bnx2 *bp = netdev_priv(dev);
6045 if (!netif_running(dev))
6048 flush_scheduled_work();
6049 bnx2_netif_stop(bp);
6050 netif_device_detach(dev);
6051 del_timer_sync(&bp->timer);
6052 if (bp->flags & NO_WOL_FLAG)
6053 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6055 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6057 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6058 bnx2_reset_chip(bp, reset_code);
6060 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6065 bnx2_resume(struct pci_dev *pdev)
6067 struct net_device *dev = pci_get_drvdata(pdev);
6068 struct bnx2 *bp = netdev_priv(dev);
6070 if (!netif_running(dev))
6073 bnx2_set_power_state(bp, PCI_D0);
6074 netif_device_attach(dev);
6076 bnx2_netif_start(bp);
6080 static struct pci_driver bnx2_pci_driver = {
6081 .name = DRV_MODULE_NAME,
6082 .id_table = bnx2_pci_tbl,
6083 .probe = bnx2_init_one,
6084 .remove = __devexit_p(bnx2_remove_one),
6085 .suspend = bnx2_suspend,
6086 .resume = bnx2_resume,
6089 static int __init bnx2_init(void)
6091 return pci_register_driver(&bnx2_pci_driver);
6094 static void __exit bnx2_cleanup(void)
6096 pci_unregister_driver(&bnx2_pci_driver);
6099 module_init(bnx2_init);
6100 module_exit(bnx2_cleanup);