1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link management by Yitchak Gertner
17 /* define this to make the driver freeze on error
18 * to allow getting debug info
19 * (you will need to reboot afterwards)
21 /*#define BNX2X_STOP_ON_ERROR*/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h> /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
64 #include "bnx2x_init.h"
66 #define DRV_MODULE_VERSION "0.40.15"
67 #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER 0x040009
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #356 $");
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
105 enum bnx2x_board_type {
109 /* indexed by board_t, above */
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
129 * locking is done by mcp
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
180 struct dmae_command *dmae = &bp->dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185 memset(dmae, 0, sizeof(struct dmae_command));
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
193 DMAE_CMD_ENDIANITY_DW_SWAP |
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
222 bnx2x_post_dmae(bp, dmae, port * 8);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
232 BNX2X_ERR("dmae timeout!\n");
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
242 struct dmae_command *dmae = &bp->dmae;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
256 DMAE_CMD_ENDIANITY_DW_SWAP |
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
280 bnx2x_post_dmae(bp, dmae, port * 8);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
286 BNX2X_ERR("dmae timeout!\n");
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
299 static int bnx2x_mc_assert(struct bnx2x *bp)
304 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = {
312 /* Go through all instances of all SEMIs */
313 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
345 static void bnx2x_fw_dump(struct bnx2x *bp)
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
359 printk(KERN_ERR PFX "%s", (char *)data);
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
366 printk(KERN_ERR PFX "%s", (char *)data);
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
371 static void bnx2x_panic_dump(struct bnx2x *bp)
376 BNX2X_ERR("begin crash dump -----------------\n");
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)"
432 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)"
433 " spq_prod_idx(%u)\n",
434 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
439 BNX2X_ERR("end crash dump -----------------\n");
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
445 static void bnx2x_enable_int(struct bnx2x *bp)
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_INT_LINE_EN_0 |
459 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
464 val, port, addr, msix);
466 REG_WR(bp, addr, val);
469 static void bnx2x_disable_int(struct bnx2x *bp)
472 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473 u32 val = REG_RD(bp, addr);
475 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477 HC_CONFIG_0_REG_INT_LINE_EN_0 |
478 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
480 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
483 REG_WR(bp, addr, val);
484 if (REG_RD(bp, addr) != val)
485 BNX2X_ERR("BUG! proper val not read from IGU!\n");
488 static void bnx2x_disable_int_sync(struct bnx2x *bp)
491 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
494 atomic_inc(&bp->intr_sem);
495 /* prevent the HW from sending interrupts */
496 bnx2x_disable_int(bp);
498 /* make sure all ISRs are done */
500 for_each_queue(bp, i)
501 synchronize_irq(bp->msix_table[i].vector);
503 /* one more for the Slow Path IRQ */
504 synchronize_irq(bp->msix_table[i].vector);
506 synchronize_irq(bp->pdev->irq);
508 /* make sure sp_task is not running */
509 cancel_work_sync(&bp->sp_task);
516 * general service functions
519 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520 u8 storm, u16 index, u8 op, u8 update)
522 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523 struct igu_ack_register igu_ack;
525 igu_ack.status_block_index = index;
526 igu_ack.sb_id_and_flags =
527 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
532 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
537 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
539 struct host_status_block *fpsb = fp->status_blk;
542 barrier(); /* status block is written to by the chip */
543 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
547 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
554 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
556 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
558 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
561 if ((rx_cons_sb != fp->rx_comp_cons) ||
562 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
568 static u16 bnx2x_ack_int(struct bnx2x *bp)
570 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
573 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574 result, BAR_IGU_INTMEM + igu_addr); */
577 #warning IGU_DEBUG active
579 BNX2X_ERR("read %x from IGU\n", result);
580 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
588 * fast path service functions
591 /* free skb in the packet ring at pos idx
592 * return idx of last bd freed
594 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
597 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598 struct eth_tx_bd *tx_bd;
599 struct sk_buff *skb = tx_buf->skb;
600 u16 bd_idx = tx_buf->first_bd;
603 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
607 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608 tx_bd = &fp->tx_desc_ring[bd_idx];
609 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
612 nbd = le16_to_cpu(tx_bd->nbd) - 1;
613 #ifdef BNX2X_STOP_ON_ERROR
614 if (nbd > (MAX_SKB_FRAGS + 2)) {
615 BNX2X_ERR("bad nbd!\n");
620 /* Skip a parse bd and the TSO split header bd
621 since they have no mapping */
623 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
625 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626 ETH_TX_BD_FLAGS_TCP_CSUM |
627 ETH_TX_BD_FLAGS_SW_LSO)) {
629 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630 tx_bd = &fp->tx_desc_ring[bd_idx];
631 /* is this a TSO split header bd? */
632 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
634 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
641 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642 tx_bd = &fp->tx_desc_ring[bd_idx];
643 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
646 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
652 tx_buf->first_bd = 0;
658 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
664 /* Tell compiler that prod and cons can change */
666 prod = fp->tx_bd_prod;
667 cons = fp->tx_bd_cons;
669 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
673 /* used = prod - cons - prod/size + cons/size */
674 used -= NUM_TX_BD - NUM_TX_RINGS;
677 BUG_TRAP(used <= fp->bp->tx_ring_size);
678 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
680 return (fp->bp->tx_ring_size - used);
683 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
685 struct bnx2x *bp = fp->bp;
686 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
689 #ifdef BNX2X_STOP_ON_ERROR
690 if (unlikely(bp->panic))
694 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695 sw_cons = fp->tx_pkt_cons;
697 while (sw_cons != hw_cons) {
700 pkt_cons = TX_BD(sw_cons);
702 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
704 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
705 hw_cons, sw_cons, pkt_cons);
707 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
709 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
712 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
720 fp->tx_pkt_cons = sw_cons;
721 fp->tx_bd_cons = bd_cons;
723 /* Need to make the tx_cons update visible to start_xmit()
724 * before checking for netif_queue_stopped(). Without the
725 * memory barrier, there is a small possibility that start_xmit()
726 * will miss it and cause the queue to be stopped forever.
730 /* TBD need a thresh? */
731 if (unlikely(netif_queue_stopped(bp->dev))) {
733 netif_tx_lock(bp->dev);
735 if (netif_queue_stopped(bp->dev) &&
736 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737 netif_wake_queue(bp->dev);
739 netif_tx_unlock(bp->dev);
744 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745 union eth_rx_cqe *rr_cqe)
747 struct bnx2x *bp = fp->bp;
748 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
751 DP(NETIF_MSG_RX_STATUS,
752 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
753 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
758 switch (command | fp->state) {
759 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760 BNX2X_FP_STATE_OPENING):
761 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
763 fp->state = BNX2X_FP_STATE_OPEN;
766 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
769 fp->state = BNX2X_FP_STATE_HALTED;
773 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
776 mb(); /* force bnx2x_wait_ramrod to see the change */
780 switch (command | bp->state) {
781 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783 bp->state = BNX2X_STATE_OPEN;
786 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789 fp->state = BNX2X_FP_STATE_HALTED;
792 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
797 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
802 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
807 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
811 mb(); /* force bnx2x_wait_ramrod to see the change */
814 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815 struct bnx2x_fastpath *fp, u16 index)
818 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
822 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823 if (unlikely(skb == NULL))
826 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
828 if (unlikely(dma_mapping_error(mapping))) {
835 pci_unmap_addr_set(rx_buf, mapping, mapping);
837 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
843 /* note that we are not allocating a new skb,
844 * we are just moving one from cons to prod
845 * we are not creating a new mapping,
846 * so there is no need to check for dma_mapping_error().
848 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849 struct sk_buff *skb, u16 cons, u16 prod)
851 struct bnx2x *bp = fp->bp;
852 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
857 pci_dma_sync_single_for_device(bp->pdev,
858 pci_unmap_addr(cons_rx_buf, mapping),
859 bp->rx_offset + RX_COPY_THRESH,
862 prod_rx_buf->skb = cons_rx_buf->skb;
863 pci_unmap_addr_set(prod_rx_buf, mapping,
864 pci_unmap_addr(cons_rx_buf, mapping));
868 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
870 struct bnx2x *bp = fp->bp;
871 u16 bd_cons, bd_prod, comp_ring_cons;
872 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
875 #ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
880 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
884 bd_cons = fp->rx_bd_cons;
885 bd_prod = fp->rx_bd_prod;
886 sw_comp_cons = fp->rx_comp_cons;
887 sw_comp_prod = fp->rx_comp_prod;
889 /* Memory barrier necessary as speculative reads of the rx
890 * buffer can be ahead of the index in the status block
894 DP(NETIF_MSG_RX_STATUS,
895 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
896 fp->index, hw_comp_cons, sw_comp_cons);
898 while (sw_comp_cons != hw_comp_cons) {
899 unsigned int len, pad;
900 struct sw_rx_bd *rx_buf;
902 union eth_rx_cqe *cqe;
904 comp_ring_cons = RCQ_BD(sw_comp_cons);
905 bd_prod = RX_BD(bd_prod);
906 bd_cons = RX_BD(bd_cons);
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
911 " comp_ring (%u) bd_ring (%u,%u)\n",
912 hw_comp_cons, sw_comp_cons,
913 comp_ring_cons, bd_prod, bd_cons);
914 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
915 " queue %x vlan %x len %x\n",
916 cqe->fast_path_cqe.type,
917 cqe->fast_path_cqe.error_type_flags,
918 cqe->fast_path_cqe.status_flags,
919 cqe->fast_path_cqe.rss_hash_result,
920 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
922 /* is this a slowpath msg? */
923 if (unlikely(cqe->fast_path_cqe.type)) {
924 bnx2x_sp_event(fp, cqe);
927 /* this is an rx packet */
929 rx_buf = &fp->rx_buf_ring[bd_cons];
932 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933 pad = cqe->fast_path_cqe.placement_offset;
935 pci_dma_sync_single_for_device(bp->pdev,
936 pci_unmap_addr(rx_buf, mapping),
937 pad + RX_COPY_THRESH,
940 prefetch(((char *)(skb)) + 128);
942 /* is this an error packet? */
943 if (unlikely(cqe->fast_path_cqe.error_type_flags &
944 ETH_RX_ERROR_FALGS)) {
945 /* do we sometimes forward error packets anyway? */
947 "ERROR flags(%u) Rx packet(%u)\n",
948 cqe->fast_path_cqe.error_type_flags,
950 /* TBD make sure MC counts this as a drop */
954 /* Since we don't have a jumbo ring
955 * copy small packets if mtu > 1500
957 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958 (len <= RX_COPY_THRESH)) {
959 struct sk_buff *new_skb;
961 new_skb = netdev_alloc_skb(bp->dev,
963 if (new_skb == NULL) {
965 "ERROR packet dropped "
966 "because of alloc failure\n");
967 /* TBD count this as a drop? */
972 skb_copy_from_linear_data_offset(skb, pad,
973 new_skb->data + pad, len);
974 skb_reserve(new_skb, pad);
975 skb_put(new_skb, len);
977 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
981 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982 pci_unmap_single(bp->pdev,
983 pci_unmap_addr(rx_buf, mapping),
986 skb_reserve(skb, pad);
991 "ERROR packet dropped because "
992 "of alloc failure\n");
994 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
998 skb->protocol = eth_type_trans(skb, bp->dev);
1000 skb->ip_summed = CHECKSUM_NONE;
1001 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002 skb->ip_summed = CHECKSUM_UNNECESSARY;
1004 /* TBD do we pass bad csum packets in promisc */
1008 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010 && (bp->vlgrp != NULL))
1011 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1015 netif_receive_skb(skb);
1017 bp->dev->last_rx = jiffies;
1022 bd_cons = NEXT_RX_IDX(bd_cons);
1023 bd_prod = NEXT_RX_IDX(bd_prod);
1025 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1029 if ((rx_pkt == budget))
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1038 REG_WR(bp, BAR_TSTRORM_INTMEM +
1039 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1041 mmiowb(); /* keep prod updates ordered */
1043 fp->rx_pkt += rx_pkt;
1049 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
1053 struct net_device *dev = bp->dev;
1054 int index = fp->index;
1056 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1059 #ifdef BNX2X_STOP_ON_ERROR
1060 if (unlikely(bp->panic))
1064 prefetch(fp->rx_cons_sb);
1065 prefetch(fp->tx_cons_sb);
1066 prefetch(&fp->status_blk->c_status_block.status_block_index);
1067 prefetch(&fp->status_blk->u_status_block.status_block_index);
1069 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1073 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1075 struct net_device *dev = dev_instance;
1076 struct bnx2x *bp = netdev_priv(dev);
1077 u16 status = bnx2x_ack_int(bp);
1079 if (unlikely(status == 0)) {
1080 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1084 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1086 #ifdef BNX2X_STOP_ON_ERROR
1087 if (unlikely(bp->panic))
1091 /* Return here if interrupt is shared and is disabled */
1092 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1098 struct bnx2x_fastpath *fp = &bp->fp[0];
1100 prefetch(fp->rx_cons_sb);
1101 prefetch(fp->tx_cons_sb);
1102 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103 prefetch(&fp->status_blk->u_status_block.status_block_index);
1105 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1112 if (unlikely(status & 0x1)) {
1114 schedule_work(&bp->sp_task);
1121 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1127 /* end of fast path */
1132 * General service functions
1135 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1137 int port = bp->port;
1139 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141 SHARED_HW_CFG_LED_MODE_SHIFT));
1142 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1144 /* Set blinking rate to ~15.9Hz */
1145 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146 LED_BLINK_RATE_VAL);
1147 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1149 /* On Ax chip versions for speeds less than 10G
1150 LED scheme is different */
1151 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1158 static void bnx2x_leds_unset(struct bnx2x *bp)
1160 int port = bp->port;
1162 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1166 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1168 u32 val = REG_RD(bp, reg);
1171 REG_WR(bp, reg, val);
1175 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1177 u32 val = REG_RD(bp, reg);
1180 REG_WR(bp, reg, val);
1184 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1188 int port = bp->port;
1189 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1191 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1192 bp->phy_addr, reg, val); */
1194 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1196 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1197 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1198 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1199 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1203 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1204 (val & EMAC_MDIO_COMM_DATA) |
1205 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1206 EMAC_MDIO_COMM_START_BUSY);
1207 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1209 for (i = 0; i < 50; i++) {
1212 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1213 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1219 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1220 BNX2X_ERR("write phy register failed\n");
1227 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1229 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1230 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1231 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1237 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1239 int port = bp->port;
1240 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1244 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1246 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1247 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1248 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1249 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1253 val = ((bp->phy_addr << 21) | (reg << 16) |
1254 EMAC_MDIO_COMM_COMMAND_READ_22 |
1255 EMAC_MDIO_COMM_START_BUSY);
1256 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1258 for (i = 0; i < 50; i++) {
1261 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1262 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1263 val &= EMAC_MDIO_COMM_DATA;
1268 if (val & EMAC_MDIO_COMM_START_BUSY) {
1269 BNX2X_ERR("read phy register failed\n");
1278 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1280 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1281 val |= EMAC_MDIO_MODE_AUTO_POLL;
1282 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1285 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1286 bp->phy_addr, reg, *ret_val); */
1291 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1295 int port = bp->port;
1296 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1298 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1300 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1301 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1302 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1303 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1307 /* set clause 45 mode */
1308 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1309 tmp |= EMAC_MDIO_MODE_CLAUSE_45;
1310 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1313 tmp = ((bp->phy_addr << 21) | (reg << 16) | addr |
1314 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1315 EMAC_MDIO_COMM_START_BUSY);
1316 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1318 for (i = 0; i < 50; i++) {
1321 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1322 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1328 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1329 BNX2X_ERR("write phy register failed\n");
1334 tmp = ((bp->phy_addr << 21) | (reg << 16) | val |
1335 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1336 EMAC_MDIO_COMM_START_BUSY);
1337 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1339 for (i = 0; i < 50; i++) {
1342 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1343 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1349 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1350 BNX2X_ERR("write phy register failed\n");
1356 /* unset clause 45 mode */
1357 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1358 tmp &= ~EMAC_MDIO_MODE_CLAUSE_45;
1359 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1361 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1363 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1364 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1365 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1371 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 reg, u32 addr,
1374 int port = bp->port;
1375 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1379 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1381 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1382 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1383 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1384 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1388 /* set clause 45 mode */
1389 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1390 val |= EMAC_MDIO_MODE_CLAUSE_45;
1391 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1394 val = ((bp->phy_addr << 21) | (reg << 16) | addr |
1395 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1396 EMAC_MDIO_COMM_START_BUSY);
1397 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1399 for (i = 0; i < 50; i++) {
1402 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1403 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1409 if (val & EMAC_MDIO_COMM_START_BUSY) {
1410 BNX2X_ERR("read phy register failed\n");
1416 val = ((bp->phy_addr << 21) | (reg << 16) |
1417 EMAC_MDIO_COMM_COMMAND_READ_45 |
1418 EMAC_MDIO_COMM_START_BUSY);
1419 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1421 for (i = 0; i < 50; i++) {
1424 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426 val &= EMAC_MDIO_COMM_DATA;
1431 if (val & EMAC_MDIO_COMM_START_BUSY) {
1432 BNX2X_ERR("read phy register failed\n");
1441 /* unset clause 45 mode */
1442 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1443 val &= ~EMAC_MDIO_MODE_CLAUSE_45;
1444 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1446 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1448 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1449 val |= EMAC_MDIO_MODE_AUTO_POLL;
1450 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1456 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 reg, u32 addr, u32 val)
1462 for (i = 0; i < 10; i++) {
1463 bnx2x_mdio45_write(bp, reg, addr, val);
1465 bnx2x_mdio45_read(bp, reg, addr, &rd_val);
1466 /* if the read value is not the same as the value we wrote,
1467 we should write it again */
1471 BNX2X_ERR("MDIO write in CL45 failed\n");
1479 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1481 u32 ld_pause; /* local driver */
1482 u32 lp_pause; /* link partner */
1487 /* resolve from gp_status in case of AN complete and not sgmii */
1488 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1489 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1490 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1491 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1493 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1494 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1496 bnx2x_mdio22_read(bp,
1497 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1499 pause_result = (ld_pause &
1500 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1501 pause_result |= (lp_pause &
1502 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1503 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1505 switch (pause_result) { /* ASYM P ASYM P */
1506 case 0xb: /* 1 0 1 1 */
1507 bp->flow_ctrl = FLOW_CTRL_TX;
1510 case 0xe: /* 1 1 1 0 */
1511 bp->flow_ctrl = FLOW_CTRL_RX;
1514 case 0x5: /* 0 1 0 1 */
1515 case 0x7: /* 0 1 1 1 */
1516 case 0xd: /* 1 1 0 1 */
1517 case 0xf: /* 1 1 1 1 */
1518 bp->flow_ctrl = FLOW_CTRL_BOTH;
1525 } else { /* forced mode */
1526 switch (bp->req_flow_ctrl) {
1527 case FLOW_CTRL_AUTO:
1528 if (bp->dev->mtu <= 4500)
1529 bp->flow_ctrl = FLOW_CTRL_BOTH;
1531 bp->flow_ctrl = FLOW_CTRL_TX;
1536 case FLOW_CTRL_BOTH:
1537 bp->flow_ctrl = bp->req_flow_ctrl;
1540 case FLOW_CTRL_NONE:
1545 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1548 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1550 bp->link_status = 0;
1552 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1553 DP(NETIF_MSG_LINK, "link up\n");
1556 bp->link_status |= LINK_STATUS_LINK_UP;
1558 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1559 bp->duplex = DUPLEX_FULL;
1561 bp->duplex = DUPLEX_HALF;
1563 bnx2x_flow_ctrl_resolve(bp, gp_status);
1565 switch (gp_status & GP_STATUS_SPEED_MASK) {
1567 bp->line_speed = SPEED_10;
1568 if (bp->duplex == DUPLEX_FULL)
1569 bp->link_status |= LINK_10TFD;
1571 bp->link_status |= LINK_10THD;
1574 case GP_STATUS_100M:
1575 bp->line_speed = SPEED_100;
1576 if (bp->duplex == DUPLEX_FULL)
1577 bp->link_status |= LINK_100TXFD;
1579 bp->link_status |= LINK_100TXHD;
1583 case GP_STATUS_1G_KX:
1584 bp->line_speed = SPEED_1000;
1585 if (bp->duplex == DUPLEX_FULL)
1586 bp->link_status |= LINK_1000TFD;
1588 bp->link_status |= LINK_1000THD;
1591 case GP_STATUS_2_5G:
1592 bp->line_speed = SPEED_2500;
1593 if (bp->duplex == DUPLEX_FULL)
1594 bp->link_status |= LINK_2500TFD;
1596 bp->link_status |= LINK_2500THD;
1601 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1605 case GP_STATUS_10G_KX4:
1606 case GP_STATUS_10G_HIG:
1607 case GP_STATUS_10G_CX4:
1608 bp->line_speed = SPEED_10000;
1609 bp->link_status |= LINK_10GTFD;
1612 case GP_STATUS_12G_HIG:
1613 bp->line_speed = SPEED_12000;
1614 bp->link_status |= LINK_12GTFD;
1617 case GP_STATUS_12_5G:
1618 bp->line_speed = SPEED_12500;
1619 bp->link_status |= LINK_12_5GTFD;
1623 bp->line_speed = SPEED_13000;
1624 bp->link_status |= LINK_13GTFD;
1628 bp->line_speed = SPEED_15000;
1629 bp->link_status |= LINK_15GTFD;
1633 bp->line_speed = SPEED_16000;
1634 bp->link_status |= LINK_16GTFD;
1638 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1643 bp->link_status |= LINK_STATUS_SERDES_LINK;
1645 if (bp->req_autoneg & AUTONEG_SPEED) {
1646 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1648 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1650 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1652 if (bp->autoneg & AUTONEG_PARALLEL)
1654 LINK_STATUS_PARALLEL_DETECTION_USED;
1657 if (bp->flow_ctrl & FLOW_CTRL_TX)
1658 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1660 if (bp->flow_ctrl & FLOW_CTRL_RX)
1661 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1663 } else { /* link_down */
1664 DP(NETIF_MSG_LINK, "link down\n");
1669 bp->duplex = DUPLEX_FULL;
1673 DP(NETIF_MSG_LINK, "gp_status 0x%x link_up %d\n"
1674 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1675 " link_status 0x%x\n",
1676 gp_status, bp->link_up, bp->line_speed, bp->duplex, bp->flow_ctrl,
1680 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1682 int port = bp->port;
1684 /* first reset all status
1685 * we assume only one line will be change at a time */
1686 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1687 (NIG_XGXS0_LINK_STATUS |
1688 NIG_SERDES0_LINK_STATUS |
1689 NIG_STATUS_INTERRUPT_XGXS0_LINK10G));
1692 /* Disable the 10G link interrupt
1693 * by writing 1 to the status register
1695 DP(NETIF_MSG_LINK, "10G XGXS link up\n");
1697 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1698 NIG_STATUS_INTERRUPT_XGXS0_LINK10G);
1700 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1701 /* Disable the link interrupt
1702 * by writing 1 to the relevant lane
1703 * in the status register
1705 DP(NETIF_MSG_LINK, "1G XGXS link up\n");
1707 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1708 ((1 << bp->ser_lane) <<
1709 NIG_XGXS0_LINK_STATUS_SIZE));
1711 } else { /* SerDes */
1712 DP(NETIF_MSG_LINK, "SerDes link up\n");
1713 /* Disable the link interrupt
1714 * by writing 1 to the status register
1717 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1718 NIG_SERDES0_LINK_STATUS);
1721 } else { /* link_down */
1725 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1731 u32 rx_sd, pcs_status;
1733 if (bp->phy_flags & PHY_XGXS_FLAG) {
1734 local_phy = bp->phy_addr;
1735 ext_phy_addr = ((bp->ext_phy_config &
1736 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1737 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1738 bp->phy_addr = (u8)ext_phy_addr;
1740 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1741 switch (ext_phy_type) {
1742 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1743 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1747 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1748 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1749 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1750 EXT_PHY_OPT_LASI_STATUS, &val);
1751 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1753 bnx2x_mdio45_read(bp, EXT_PHY_OPT_WIS_DEVAD,
1754 EXT_PHY_OPT_LASI_STATUS, &val);
1755 DP(NETIF_MSG_LINK, "8705 LASI status is %d\n", val);
1757 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1758 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1759 val = (rx_sd & 0x1);
1762 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
1763 DP(NETIF_MSG_LINK, "XGXS 8706\n");
1764 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1765 EXT_PHY_OPT_LASI_STATUS, &val);
1766 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1768 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1769 EXT_PHY_OPT_LASI_STATUS, &val);
1770 DP(NETIF_MSG_LINK, "8706 LASI status is %d\n", val);
1772 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
1773 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
1774 bnx2x_mdio45_read(bp, EXT_PHY_OPT_PCS_DEVAD,
1775 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
1776 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
1777 " pcs_status 0x%x\n", rx_sd, pcs_status);
1778 /* link is up if both bit 0 of pmd_rx and
1779 * bit 0 of pcs_status are set
1781 val = (rx_sd & pcs_status);
1785 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
1786 bp->ext_phy_config);
1790 bp->phy_addr = local_phy;
1792 } else { /* SerDes */
1793 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
1794 switch (ext_phy_type) {
1795 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
1796 DP(NETIF_MSG_LINK, "SerDes Direct\n");
1800 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
1801 DP(NETIF_MSG_LINK, "SerDes 5482\n");
1806 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
1807 bp->ext_phy_config);
1816 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
1818 int port = bp->port;
1819 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
1820 NIG_REG_INGRESS_BMAC0_MEM;
1824 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
1825 /* reset and unreset the BigMac */
1826 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1827 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1829 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1830 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
1832 /* enable access for bmac registers */
1833 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
1838 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
1842 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
1843 (bp->dev->dev_addr[3] << 16) |
1844 (bp->dev->dev_addr[4] << 8) |
1845 bp->dev->dev_addr[5]);
1846 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
1847 bp->dev->dev_addr[1]);
1848 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
1853 if (bp->flow_ctrl & FLOW_CTRL_TX)
1857 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
1860 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
1862 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
1868 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
1872 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
1875 /* rx control set to don't strip crc */
1877 if (bp->flow_ctrl & FLOW_CTRL_RX)
1881 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
1884 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
1886 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
1888 /* set cnt max size */
1889 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
1891 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
1894 /* configure safc */
1895 wb_write[0] = 0x1000200;
1897 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
1900 /* fix for emulation */
1901 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1902 wb_write[0] = 0xf000;
1905 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
1909 /* reset old bmac stats */
1910 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
1912 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
1915 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
1916 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
1918 /* disable the NIG in/out to the emac */
1919 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
1920 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
1921 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
1923 /* enable the NIG in/out to the bmac */
1924 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
1926 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
1928 if (bp->flow_ctrl & FLOW_CTRL_TX)
1930 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
1931 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
1933 bp->phy_flags |= PHY_BMAC_FLAG;
1935 bp->stats_state = STATS_STATE_ENABLE;
1938 static void bnx2x_emac_enable(struct bnx2x *bp)
1940 int port = bp->port;
1941 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1945 DP(NETIF_MSG_LINK, "enabling EMAC\n");
1946 /* reset and unreset the emac core */
1947 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
1948 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1950 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
1951 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
1953 /* enable emac and not bmac */
1954 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
1957 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
1958 /* Use lane 1 (of lanes 0-3) */
1959 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1960 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1963 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
1964 /* Use lane 1 (of lanes 0-3) */
1965 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
1966 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1970 if (bp->phy_flags & PHY_XGXS_FLAG) {
1971 DP(NETIF_MSG_LINK, "XGXS\n");
1972 /* select the master lanes (out of 0-3) */
1973 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
1976 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
1978 } else { /* SerDes */
1979 DP(NETIF_MSG_LINK, "SerDes\n");
1981 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
1986 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
1988 /* init emac - use read-modify-write */
1989 /* self clear reset */
1990 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1991 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
1994 while (val & EMAC_MODE_RESET) {
1995 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
1996 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
1998 BNX2X_ERR("EMAC timeout!\n");
2005 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2008 while (val & EMAC_TX_MODE_RESET) {
2009 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2010 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2012 BNX2X_ERR("EMAC timeout!\n");
2018 if (CHIP_REV_IS_SLOW(bp)) {
2019 /* config GMII mode */
2020 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2021 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2024 /* pause enable/disable */
2025 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2026 EMAC_RX_MODE_FLOW_EN);
2027 if (bp->flow_ctrl & FLOW_CTRL_RX)
2028 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2029 EMAC_RX_MODE_FLOW_EN);
2031 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2032 EMAC_TX_MODE_EXT_PAUSE_EN);
2033 if (bp->flow_ctrl & FLOW_CTRL_TX)
2034 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2035 EMAC_TX_MODE_EXT_PAUSE_EN);
2038 /* KEEP_VLAN_TAG, promiscuous */
2039 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2040 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2041 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2043 /* identify magic packets */
2044 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2045 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2047 /* enable emac for jumbo packets */
2048 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2049 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2050 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2053 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2055 val = ((bp->dev->dev_addr[0] << 8) |
2056 bp->dev->dev_addr[1]);
2057 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2059 val = ((bp->dev->dev_addr[2] << 24) |
2060 (bp->dev->dev_addr[3] << 16) |
2061 (bp->dev->dev_addr[4] << 8) |
2062 bp->dev->dev_addr[5]);
2063 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2065 /* disable the NIG in/out to the bmac */
2066 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2067 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2068 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2070 /* enable the NIG in/out to the emac */
2071 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2073 if (bp->flow_ctrl & FLOW_CTRL_TX)
2075 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2076 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2078 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2079 /* take the BigMac out of reset */
2080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2081 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2083 /* enable access for bmac registers */
2084 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2087 bp->phy_flags |= PHY_EMAC_FLAG;
2089 bp->stats_state = STATS_STATE_ENABLE;
2092 static void bnx2x_emac_program(struct bnx2x *bp)
2095 int port = bp->port;
2097 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2098 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2099 (EMAC_MODE_25G_MODE |
2100 EMAC_MODE_PORT_MII_10M |
2101 EMAC_MODE_HALF_DUPLEX));
2102 switch (bp->line_speed) {
2104 mode |= EMAC_MODE_PORT_MII_10M;
2108 mode |= EMAC_MODE_PORT_MII;
2112 mode |= EMAC_MODE_PORT_GMII;
2116 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2120 /* 10G not valid for EMAC */
2121 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2125 if (bp->duplex == DUPLEX_HALF)
2126 mode |= EMAC_MODE_HALF_DUPLEX;
2127 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2130 bnx2x_leds_set(bp, bp->line_speed);
2133 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2139 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2140 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2142 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2143 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2145 /* bits [10:7] at lp_up2, positioned at [15:12] */
2146 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2147 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2148 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2150 if ((lp_up2 != 0) &&
2151 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2152 /* replace tx_driver bits [15:12] */
2153 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2154 tx_driver |= lp_up2;
2155 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2159 static void bnx2x_pbf_update(struct bnx2x *bp)
2161 int port = bp->port;
2167 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2169 /* wait for init credit */
2170 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2171 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2172 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2174 while ((init_crd != crd) && count) {
2177 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2180 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2181 if (init_crd != crd)
2182 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2184 if (bp->flow_ctrl & FLOW_CTRL_RX)
2186 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2188 /* update threshold */
2189 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2190 /* update init credit */
2191 init_crd = 778; /* (800-18-4) */
2194 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2196 /* update threshold */
2197 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2198 /* update init credit */
2199 switch (bp->line_speed) {
2203 init_crd = thresh + 55 - 22;
2207 init_crd = thresh + 138 - 22;
2211 init_crd = thresh + 553 - 22;
2215 BNX2X_ERR("Invalid line_speed 0x%x\n",
2220 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2221 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2222 bp->line_speed, init_crd);
2224 /* probe the credit changes */
2225 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2227 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2230 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2233 static void bnx2x_update_mng(struct bnx2x *bp)
2236 SHMEM_WR(bp, drv_fw_mb[bp->port].link_status,
2240 static void bnx2x_link_report(struct bnx2x *bp)
2243 netif_carrier_on(bp->dev);
2244 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2246 printk("%d Mbps ", bp->line_speed);
2248 if (bp->duplex == DUPLEX_FULL)
2249 printk("full duplex");
2251 printk("half duplex");
2253 if (bp->flow_ctrl) {
2254 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2255 printk(", receive ");
2256 if (bp->flow_ctrl & FLOW_CTRL_TX)
2257 printk("& transmit ");
2259 printk(", transmit ");
2261 printk("flow control ON");
2265 } else { /* link_down */
2266 netif_carrier_off(bp->dev);
2267 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2271 static void bnx2x_link_up(struct bnx2x *bp)
2273 int port = bp->port;
2276 bnx2x_pbf_update(bp);
2279 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2281 /* update shared memory */
2282 bnx2x_update_mng(bp);
2284 /* indicate link up */
2285 bnx2x_link_report(bp);
2288 static void bnx2x_link_down(struct bnx2x *bp)
2290 int port = bp->port;
2293 if (bp->stats_state != STATS_STATE_DISABLE) {
2294 bp->stats_state = STATS_STATE_STOP;
2295 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2298 /* indicate link down */
2299 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2302 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2303 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2305 /* ignore drain flag interrupt */
2306 /* activate nig drain */
2307 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2309 /* update shared memory */
2310 bnx2x_update_mng(bp);
2312 /* indicate link down */
2313 bnx2x_link_report(bp);
2316 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2318 /* This function is called upon link interrupt */
2319 static void bnx2x_link_update(struct bnx2x *bp)
2322 int port = bp->port;
2326 DP(NETIF_MSG_LINK, "port %x, is xgxs %x, stat_mask 0x%x,"
2327 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2328 " 10G %x, XGXS_LINK %x\n", port, (bp->phy_flags & PHY_XGXS_FLAG),
2329 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2330 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2331 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2332 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2333 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2334 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2338 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2339 /* avoid fast toggling */
2340 for (i = 0 ; i < 10 ; i++) {
2342 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2346 bnx2x_link_settings_status(bp, gp_status);
2348 /* anything 10 and over uses the bmac */
2349 link_10g = ((bp->line_speed >= SPEED_10000) &&
2350 (bp->line_speed <= SPEED_16000));
2352 bnx2x_link_int_ack(bp, link_10g);
2354 /* link is up only if both local phy and external phy are up */
2355 if (bp->link_up && bnx2x_ext_phy_is_link_up(bp)) {
2357 bnx2x_bmac_enable(bp, 0);
2358 bnx2x_leds_set(bp, SPEED_10000);
2361 bnx2x_emac_enable(bp);
2362 bnx2x_emac_program(bp);
2365 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2366 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2367 bnx2x_set_sgmii_tx_driver(bp);
2372 } else { /* link down */
2373 bnx2x_leds_unset(bp);
2374 bnx2x_link_down(bp);
2377 bnx2x_init_mac_stats(bp);
2381 * Init service functions
2384 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2386 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2387 (bp->phy_addr + bp->ser_lane) : 0;
2389 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2390 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2393 static void bnx2x_set_master_ln(struct bnx2x *bp)
2397 /* set the master_ln for AN */
2398 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2399 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2401 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2402 (new_master_ln | bp->ser_lane));
2405 static void bnx2x_reset_unicore(struct bnx2x *bp)
2410 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2411 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2412 /* reset the unicore */
2413 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2414 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2416 /* wait for the reset to self clear */
2417 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2420 /* the reset erased the previous bank value */
2421 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2422 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2425 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2431 BNX2X_ERR("BUG! unicore is still in reset!\n");
2434 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2436 /* Each two bits represents a lane number:
2437 No swap is 0123 => 0x1b no need to enable the swap */
2439 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2440 if (bp->rx_lane_swap != 0x1b) {
2441 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2443 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2444 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2446 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2449 if (bp->tx_lane_swap != 0x1b) {
2450 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2452 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2454 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2458 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2462 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2463 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2466 if (bp->autoneg & AUTONEG_PARALLEL) {
2467 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2469 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2471 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2474 if (bp->phy_flags & PHY_XGXS_FLAG) {
2475 DP(NETIF_MSG_LINK, "XGXS\n");
2476 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2478 bnx2x_mdio22_write(bp,
2479 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2480 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2482 bnx2x_mdio22_read(bp,
2483 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2486 if (bp->autoneg & AUTONEG_PARALLEL) {
2488 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2491 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2493 bnx2x_mdio22_write(bp,
2494 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2499 static void bnx2x_set_autoneg(struct bnx2x *bp)
2504 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2505 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2506 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2507 (bp->autoneg & AUTONEG_CL37)) {
2508 /* CL37 Autoneg Enabled */
2509 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2511 /* CL37 Autoneg Disabled */
2512 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2513 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2515 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2517 /* Enable/Disable Autodetection */
2518 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2519 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
2520 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2522 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2523 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2524 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2526 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2528 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2530 /* Enable TetonII and BAM autoneg */
2531 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2532 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2534 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2535 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2536 /* Enable BAM aneg Mode and TetonII aneg Mode */
2537 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2538 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2540 /* TetonII and BAM Autoneg Disabled */
2541 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2542 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2544 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2547 /* Enable Clause 73 Aneg */
2548 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2549 (bp->autoneg & AUTONEG_CL73)) {
2550 /* Enable BAM Station Manager */
2551 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2552 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2553 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2554 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2555 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2557 /* Merge CL73 and CL37 aneg resolution */
2558 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2560 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2562 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2564 /* Set the CL73 AN speed */
2565 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2566 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
2567 /* In the SerDes we support only the 1G.
2568 In the XGXS we support the 10G KX4
2569 but we currently do not support the KR */
2570 if (bp->phy_flags & PHY_XGXS_FLAG) {
2571 DP(NETIF_MSG_LINK, "XGXS\n");
2573 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2575 DP(NETIF_MSG_LINK, "SerDes\n");
2577 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2579 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2581 /* CL73 Autoneg Enabled */
2582 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2584 /* CL73 Autoneg Disabled */
2587 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2588 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2591 /* program SerDes, forced speed */
2592 static void bnx2x_program_serdes(struct bnx2x *bp)
2596 /* program duplex, disable autoneg */
2597 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2598 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2599 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2600 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2601 if (bp->req_duplex == DUPLEX_FULL)
2602 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2603 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2606 - needed only if the speed is greater than 1G (2.5G or 10G) */
2607 if (bp->req_line_speed > SPEED_1000) {
2608 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2609 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, ®_val);
2610 /* clearing the speed value before setting the right speed */
2611 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2612 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2613 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2614 if (bp->req_line_speed == SPEED_10000)
2616 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2617 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2621 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
2625 /* configure the 48 bits for BAM AN */
2626 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2628 /* set extended capabilities */
2629 if (bp->advertising & ADVERTISED_2500baseT_Full)
2630 val |= MDIO_OVER_1G_UP1_2_5G;
2631 if (bp->advertising & ADVERTISED_10000baseT_Full)
2632 val |= MDIO_OVER_1G_UP1_10G;
2633 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
2635 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
2638 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
2642 /* for AN, we are always publishing full duplex */
2643 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
2646 switch (bp->pause_mode) {
2647 case PAUSE_SYMMETRIC:
2648 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
2650 case PAUSE_ASYMMETRIC:
2651 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
2654 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
2657 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
2661 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2662 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
2665 static void bnx2x_restart_autoneg(struct bnx2x *bp)
2667 if (bp->autoneg & AUTONEG_CL73) {
2668 /* enable and restart clause 73 aneg */
2671 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2672 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2674 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
2676 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
2677 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
2680 /* Enable and restart BAM/CL37 aneg */
2683 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2684 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2686 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2688 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2689 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
2693 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
2697 /* in SGMII mode, the unicore is always slave */
2698 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2699 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2701 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
2702 /* set sgmii mode (and not fiber) */
2703 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
2704 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
2705 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
2706 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
2709 /* if forced speed */
2710 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2711 /* set speed, disable autoneg */
2714 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2715 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2717 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2718 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
2719 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
2721 switch (bp->req_line_speed) {
2724 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
2728 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
2731 /* there is nothing to set for 10M */
2734 /* invalid speed for SGMII */
2735 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
2736 bp->req_line_speed);
2740 /* setting the full duplex */
2741 if (bp->req_duplex == DUPLEX_FULL)
2743 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2744 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2747 } else { /* AN mode */
2748 /* enable and restart AN */
2749 bnx2x_restart_autoneg(bp);
2753 static void bnx2x_link_int_enable(struct bnx2x *bp)
2755 int port = bp->port;
2757 /* setting the status to report on link up
2758 for either XGXS or SerDes */
2759 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
2760 (NIG_XGXS0_LINK_STATUS |
2761 NIG_STATUS_INTERRUPT_XGXS0_LINK10G |
2762 NIG_SERDES0_LINK_STATUS));
2764 if (bp->phy_flags & PHY_XGXS_FLAG) {
2766 * in force mode (not AN) we can enable just the relevant
2768 * Even in AN we might enable only one according to the AN
2771 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2772 (NIG_MASK_XGXS0_LINK_STATUS |
2773 NIG_MASK_XGXS0_LINK10G));
2774 DP(NETIF_MSG_LINK, "enable XGXS interrupt\n");
2776 } else { /* SerDes */
2777 bnx2x_bits_en(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2778 NIG_MASK_SERDES0_LINK_STATUS);
2779 DP(NETIF_MSG_LINK, "enable SerDes interrupt\n");
2783 static void bnx2x_ext_phy_init(struct bnx2x *bp)
2785 int port = bp->port;
2790 if (bp->phy_flags & PHY_XGXS_FLAG) {
2791 local_phy = bp->phy_addr;
2792 ext_phy_addr = ((bp->ext_phy_config &
2793 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2794 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2796 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2797 switch (ext_phy_type) {
2798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2799 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2802 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2803 DP(NETIF_MSG_LINK, "XGXS 8705\n");
2805 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2807 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2809 bp->phy_addr = ext_phy_type;
2810 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2811 EXT_PHY_OPT_PMD_MISC_CNTL,
2813 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2814 EXT_PHY_OPT_PHY_IDENTIFIER,
2816 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2817 EXT_PHY_OPT_CMU_PLL_BYPASS,
2819 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_WIS_DEVAD,
2820 EXT_PHY_OPT_LASI_CNTL, 0x1);
2823 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2824 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2826 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2828 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2830 bp->phy_addr = ext_phy_type;
2831 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2832 EXT_PHY_OPT_PMD_DIGITAL_CNT,
2834 bnx2x_mdio45_vwrite(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2835 EXT_PHY_OPT_LASI_CNTL, 0x1);
2839 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2840 bp->ext_phy_config);
2843 bp->phy_addr = local_phy;
2845 } else { /* SerDes */
2846 /* ext_phy_addr = ((bp->ext_phy_config &
2847 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
2848 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
2850 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2851 switch (ext_phy_type) {
2852 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2853 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2856 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2857 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2859 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2861 DP(NETIF_MSG_LINK, "enabled external phy int\n");
2865 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2866 bp->ext_phy_config);
2872 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
2878 if (bp->phy_flags & PHY_XGXS_FLAG) {
2879 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
2880 switch (ext_phy_type) {
2881 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
2882 DP(NETIF_MSG_LINK, "XGXS Direct\n");
2885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
2886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2887 DP(NETIF_MSG_LINK, "XGXS 8705/6\n");
2888 local_phy = bp->phy_addr;
2889 ext_phy_addr = ((bp->ext_phy_config &
2890 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
2891 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
2892 bp->phy_addr = (u8)ext_phy_addr;
2893 bnx2x_mdio45_write(bp, EXT_PHY_OPT_PMA_PMD_DEVAD,
2894 EXT_PHY_OPT_CNTL, 0xa040);
2895 bp->phy_addr = local_phy;
2899 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2900 bp->ext_phy_config);
2904 } else { /* SerDes */
2905 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2906 switch (ext_phy_type) {
2907 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2908 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2911 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2912 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2916 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2917 bp->ext_phy_config);
2923 static void bnx2x_link_initialize(struct bnx2x *bp)
2925 int port = bp->port;
2927 /* disable attentions */
2928 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
2929 (NIG_MASK_XGXS0_LINK_STATUS |
2930 NIG_MASK_XGXS0_LINK10G |
2931 NIG_MASK_SERDES0_LINK_STATUS |
2934 bnx2x_ext_phy_reset(bp);
2936 bnx2x_set_aer_mmd(bp);
2938 if (bp->phy_flags & PHY_XGXS_FLAG)
2939 bnx2x_set_master_ln(bp);
2941 /* reset the SerDes and wait for reset bit return low */
2942 bnx2x_reset_unicore(bp);
2944 bnx2x_set_aer_mmd(bp);
2946 /* setting the masterLn_def again after the reset */
2947 if (bp->phy_flags & PHY_XGXS_FLAG) {
2948 bnx2x_set_master_ln(bp);
2949 bnx2x_set_swap_lanes(bp);
2952 /* Set Parallel Detect */
2953 if (bp->req_autoneg & AUTONEG_SPEED)
2954 bnx2x_set_parallel_detection(bp);
2956 if (bp->phy_flags & PHY_XGXS_FLAG) {
2957 if (bp->req_line_speed &&
2958 bp->req_line_speed < SPEED_1000) {
2959 bp->phy_flags |= PHY_SGMII_FLAG;
2961 bp->phy_flags &= ~PHY_SGMII_FLAG;
2965 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
2968 rx_eq = ((bp->serdes_config &
2969 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
2970 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
2972 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
2973 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
2974 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
2975 MDIO_SET_REG_BANK(bp, bank);
2976 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
2978 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
2979 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
2982 /* forced speed requested? */
2983 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
2984 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
2986 /* disable autoneg */
2987 bnx2x_set_autoneg(bp);
2989 /* program speed and duplex */
2990 bnx2x_program_serdes(bp);
2992 } else { /* AN_mode */
2993 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
2996 bnx2x_set_brcm_cl37_advertisment(bp);
2998 /* program duplex & pause advertisement (for aneg) */
2999 bnx2x_set_ieee_aneg_advertisment(bp);
3001 /* enable autoneg */
3002 bnx2x_set_autoneg(bp);
3004 /* enable and restart AN */
3005 bnx2x_restart_autoneg(bp);
3008 } else { /* SGMII mode */
3009 DP(NETIF_MSG_LINK, "SGMII\n");
3011 bnx2x_initialize_sgmii_process(bp);
3014 /* enable the interrupt */
3015 bnx2x_link_int_enable(bp);
3017 /* init ext phy and enable link state int */
3018 bnx2x_ext_phy_init(bp);
3021 static void bnx2x_phy_deassert(struct bnx2x *bp)
3023 int port = bp->port;
3026 if (bp->phy_flags & PHY_XGXS_FLAG) {
3027 DP(NETIF_MSG_LINK, "XGXS\n");
3028 val = XGXS_RESET_BITS;
3030 } else { /* SerDes */
3031 DP(NETIF_MSG_LINK, "SerDes\n");
3032 val = SERDES_RESET_BITS;
3035 val = val << (port*16);
3037 /* reset and unreset the SerDes/XGXS */
3038 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3043 static int bnx2x_phy_init(struct bnx2x *bp)
3045 DP(NETIF_MSG_LINK, "started\n");
3046 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3047 bp->phy_flags |= PHY_EMAC_FLAG;
3049 bp->line_speed = SPEED_10000;
3050 bp->duplex = DUPLEX_FULL;
3051 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3052 bnx2x_emac_enable(bp);
3053 bnx2x_link_report(bp);
3056 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3057 bp->phy_flags |= PHY_BMAC_FLAG;
3059 bp->line_speed = SPEED_10000;
3060 bp->duplex = DUPLEX_FULL;
3061 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3062 bnx2x_bmac_enable(bp, 0);
3063 bnx2x_link_report(bp);
3067 bnx2x_phy_deassert(bp);
3068 bnx2x_link_initialize(bp);
3074 static void bnx2x_link_reset(struct bnx2x *bp)
3076 int port = bp->port;
3078 /* disable attentions */
3079 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3080 (NIG_MASK_XGXS0_LINK_STATUS |
3081 NIG_MASK_XGXS0_LINK10G |
3082 NIG_MASK_SERDES0_LINK_STATUS |
3085 bnx2x_ext_phy_reset(bp);
3087 /* reset the SerDes/XGXS */
3088 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3089 (0x1ff << (port*16)));
3091 /* reset EMAC / BMAC and disable NIG interfaces */
3092 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3093 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3095 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3096 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3097 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3099 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3102 #ifdef BNX2X_XGXS_LB
3103 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3105 int port = bp->port;
3110 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3112 /* change the uni_phy_addr in the nig */
3113 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3115 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3117 /* change the aer mmd */
3118 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3119 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3121 /* config combo IEEE0 control reg for loopback */
3122 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3123 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3126 /* set aer mmd back */
3127 bnx2x_set_aer_mmd(bp);
3130 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3135 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3137 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3138 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3140 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3142 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3147 /* end of PHY/MAC */
3152 * General service functions
3155 /* the slow path queue is odd since completions arrive on the fastpath ring */
3156 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3157 u32 data_hi, u32 data_lo, int common)
3159 int port = bp->port;
3162 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3163 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3164 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3165 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3167 #ifdef BNX2X_STOP_ON_ERROR
3168 if (unlikely(bp->panic))
3172 spin_lock(&bp->spq_lock);
3174 if (!bp->spq_left) {
3175 BNX2X_ERR("BUG! SPQ ring full!\n");
3176 spin_unlock(&bp->spq_lock);
3180 /* CID needs port number to be encoded int it */
3181 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3182 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3184 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3186 bp->spq_prod_bd->hdr.type |=
3187 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3189 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3190 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3194 if (bp->spq_prod_bd == bp->spq_last_bd) {
3195 bp->spq_prod_bd = bp->spq;
3196 bp->spq_prod_idx = 0;
3197 DP(NETIF_MSG_TIMER, "end of spq\n");
3204 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
3207 spin_unlock(&bp->spq_lock);
3211 /* acquire split MCP access lock register */
3212 static int bnx2x_lock_alr(struct bnx2x *bp)
3219 for (j = 0; j < i*10; j++) {
3221 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3222 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3223 if (val & (1L << 31))
3229 if (!(val & (1L << 31))) {
3230 BNX2X_ERR("Cannot acquire nvram interface\n");
3238 /* Release split MCP access lock register */
3239 static void bnx2x_unlock_alr(struct bnx2x *bp)
3243 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3246 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3248 struct host_def_status_block *def_sb = bp->def_status_blk;
3251 barrier(); /* status block is written to by the chip */
3253 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3254 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3257 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
3258 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
3261 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
3262 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
3265 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
3266 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
3269 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
3270 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
3277 * slow path service functions
3280 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3282 int port = bp->port;
3283 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
3284 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3285 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3286 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3287 NIG_REG_MASK_INTERRUPT_PORT0;
3289 if (~bp->aeu_mask & (asserted & 0xff))
3290 BNX2X_ERR("IGU ERROR\n");
3291 if (bp->attn_state & asserted)
3292 BNX2X_ERR("IGU ERROR\n");
3294 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3295 bp->aeu_mask, asserted);
3296 bp->aeu_mask &= ~(asserted & 0xff);
3297 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
3299 REG_WR(bp, aeu_addr, bp->aeu_mask);
3301 bp->attn_state |= asserted;
3303 if (asserted & ATTN_HARD_WIRED_MASK) {
3304 if (asserted & ATTN_NIG_FOR_FUNC) {
3305 u32 nig_status_port;
3306 u32 nig_int_addr = port ?
3307 NIG_REG_STATUS_INTERRUPT_PORT1 :
3308 NIG_REG_STATUS_INTERRUPT_PORT0;
3310 bp->nig_mask = REG_RD(bp, nig_mask_addr);
3311 REG_WR(bp, nig_mask_addr, 0);
3313 nig_status_port = REG_RD(bp, nig_int_addr);
3314 bnx2x_link_update(bp);
3316 /* handle unicore attn? */
3318 if (asserted & ATTN_SW_TIMER_4_FUNC)
3319 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3321 if (asserted & GPIO_2_FUNC)
3322 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3324 if (asserted & GPIO_3_FUNC)
3325 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3327 if (asserted & GPIO_4_FUNC)
3328 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3331 if (asserted & ATTN_GENERAL_ATTN_1) {
3332 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3335 if (asserted & ATTN_GENERAL_ATTN_2) {
3336 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3337 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3339 if (asserted & ATTN_GENERAL_ATTN_3) {
3340 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3341 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3344 if (asserted & ATTN_GENERAL_ATTN_4) {
3345 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3346 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3348 if (asserted & ATTN_GENERAL_ATTN_5) {
3349 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3350 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3352 if (asserted & ATTN_GENERAL_ATTN_6) {
3353 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3354 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3358 } /* if hardwired */
3360 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
3361 asserted, BAR_IGU_INTMEM + igu_addr);
3362 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
3364 /* now set back the mask */
3365 if (asserted & ATTN_NIG_FOR_FUNC)
3366 REG_WR(bp, nig_mask_addr, bp->nig_mask);
3369 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3371 int port = bp->port;
3373 struct attn_route attn;
3374 struct attn_route group_mask;
3378 /* need to take HW lock because MCP or other port might also
3379 try to handle this event */
3382 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3383 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3384 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3385 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3386 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
3388 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3389 if (deasserted & (1 << index)) {
3390 group_mask = bp->attn_group[index];
3392 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
3393 (unsigned long long)group_mask.sig[0]);
3395 if (attn.sig[3] & group_mask.sig[3] &
3396 EVEREST_GEN_ATTN_IN_USE_MASK) {
3398 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
3400 BNX2X_ERR("MC assert!\n");
3403 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
3405 BNX2X_ERR("MCP assert!\n");
3407 MISC_REG_AEU_GENERAL_ATTN_11, 0);
3408 bnx2x_mc_assert(bp);
3411 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
3415 if (attn.sig[1] & group_mask.sig[1] &
3416 BNX2X_DOORQ_ASSERT) {
3418 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3419 BNX2X_ERR("DB hw attention 0x%x\n", val);
3420 /* DORQ discard attention */
3422 BNX2X_ERR("FATAL error from DORQ\n");
3425 if (attn.sig[2] & group_mask.sig[2] &
3426 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3428 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3429 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3430 /* CFC error attention */
3432 BNX2X_ERR("FATAL error from CFC\n");
3435 if (attn.sig[2] & group_mask.sig[2] &
3436 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3438 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3439 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3440 /* RQ_USDMDP_FIFO_OVERFLOW */
3442 BNX2X_ERR("FATAL error from PXP\n");
3445 if (attn.sig[3] & group_mask.sig[3] &
3446 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3448 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
3450 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
3454 if ((attn.sig[0] & group_mask.sig[0] &
3455 HW_INTERRUT_ASSERT_SET_0) ||
3456 (attn.sig[1] & group_mask.sig[1] &
3457 HW_INTERRUT_ASSERT_SET_1) ||
3458 (attn.sig[2] & group_mask.sig[2] &
3459 HW_INTERRUT_ASSERT_SET_2))
3460 BNX2X_ERR("FATAL HW block attention\n");
3462 if ((attn.sig[0] & group_mask.sig[0] &
3463 HW_PRTY_ASSERT_SET_0) ||
3464 (attn.sig[1] & group_mask.sig[1] &
3465 HW_PRTY_ASSERT_SET_1) ||
3466 (attn.sig[2] & group_mask.sig[2] &
3467 HW_PRTY_ASSERT_SET_2))
3468 BNX2X_ERR("FATAL HW block parity attention\n");
3472 bnx2x_unlock_alr(bp);
3474 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
3477 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
3478 val, BAR_IGU_INTMEM + reg_addr); */
3479 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
3481 if (bp->aeu_mask & (deasserted & 0xff))
3482 BNX2X_ERR("IGU BUG\n");
3483 if (~bp->attn_state & deasserted)
3484 BNX2X_ERR("IGU BUG\n");
3486 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3487 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3489 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
3490 bp->aeu_mask |= (deasserted & 0xff);
3492 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
3493 REG_WR(bp, reg_addr, bp->aeu_mask);
3495 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3496 bp->attn_state &= ~deasserted;
3497 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3500 static void bnx2x_attn_int(struct bnx2x *bp)
3502 /* read local copy of bits */
3503 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
3504 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
3505 u32 attn_state = bp->attn_state;
3507 /* look for changed bits */
3508 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3509 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3512 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3513 attn_bits, attn_ack, asserted, deasserted);
3515 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3516 BNX2X_ERR("bad attention state\n");
3518 /* handle bits that were raised */
3520 bnx2x_attn_int_asserted(bp, asserted);
3523 bnx2x_attn_int_deasserted(bp, deasserted);
3526 static void bnx2x_sp_task(struct work_struct *work)
3528 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
3531 /* Return here if interrupt is disabled */
3532 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3533 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3537 status = bnx2x_update_dsb_idx(bp);
3539 BNX2X_ERR("spurious slowpath interrupt!\n");
3541 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
3548 /* CStorm events: query_stats, cfc delete ramrods */
3550 bp->stat_pending = 0;
3552 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
3554 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3556 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3558 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3560 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3564 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3566 struct net_device *dev = dev_instance;
3567 struct bnx2x *bp = netdev_priv(dev);
3569 /* Return here if interrupt is disabled */
3570 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3571 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3575 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
3577 #ifdef BNX2X_STOP_ON_ERROR
3578 if (unlikely(bp->panic))
3582 schedule_work(&bp->sp_task);
3587 /* end of slow path */
3591 /****************************************************************************
3593 ****************************************************************************/
3595 #define UPDATE_STAT(s, t) \
3597 estats->t += new->s - old->s; \
3601 /* sum[hi:lo] += add[hi:lo] */
3602 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3605 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
3608 /* difference = minuend - subtrahend */
3609 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3611 if (m_lo < s_lo) { /* underflow */ \
3612 d_hi = m_hi - s_hi; \
3613 if (d_hi > 0) { /* we can 'loan' 1 */ \
3615 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3616 } else { /* m_hi <= s_hi */ \
3620 } else { /* m_lo >= s_lo */ \
3621 if (m_hi < s_hi) { \
3624 } else { /* m_hi >= s_hi */ \
3625 d_hi = m_hi - s_hi; \
3626 d_lo = m_lo - s_lo; \
3631 /* minuend -= subtrahend */
3632 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3634 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3637 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
3639 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
3640 diff.lo, new->s_lo, old->s_lo); \
3641 old->s_hi = new->s_hi; \
3642 old->s_lo = new->s_lo; \
3643 ADD_64(estats->t_hi, diff.hi, \
3644 estats->t_lo, diff.lo); \
3647 /* sum[hi:lo] += add */
3648 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3651 s_hi += (s_lo < a) ? 1 : 0; \
3654 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
3656 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
3659 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
3661 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3662 old_tclient->s = le32_to_cpu(tclient->s); \
3663 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
3667 * General service functions
3670 static inline long bnx2x_hilo(u32 *hiref)
3672 u32 lo = *(hiref + 1);
3673 #if (BITS_PER_LONG == 64)
3676 return HILO_U64(hi, lo);
3683 * Init service functions
3686 static void bnx2x_init_mac_stats(struct bnx2x *bp)
3688 struct dmae_command *dmae;
3689 int port = bp->port;
3690 int loader_idx = port * 8;
3694 bp->executer_idx = 0;
3697 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3698 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3700 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3702 DMAE_CMD_ENDIANITY_DW_SWAP |
3704 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3707 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
3709 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3710 dmae->opcode = opcode;
3711 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
3713 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
3715 dmae->dst_addr_lo = bp->fw_mb >> 2;
3716 dmae->dst_addr_hi = 0;
3717 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
3720 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3721 dmae->comp_addr_hi = 0;
3724 dmae->comp_addr_lo = 0;
3725 dmae->comp_addr_hi = 0;
3731 /* no need to collect statistics in link down */
3735 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3736 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3737 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3739 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3741 DMAE_CMD_ENDIANITY_DW_SWAP |
3743 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3745 if (bp->phy_flags & PHY_BMAC_FLAG) {
3747 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3748 NIG_REG_INGRESS_BMAC0_MEM);
3750 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3751 BIGMAC_REGISTER_TX_STAT_GTBYT */
3752 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3753 dmae->opcode = opcode;
3754 dmae->src_addr_lo = (mac_addr +
3755 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3756 dmae->src_addr_hi = 0;
3757 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3758 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3759 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3760 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3761 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3762 dmae->comp_addr_hi = 0;
3765 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3766 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3767 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3768 dmae->opcode = opcode;
3769 dmae->src_addr_lo = (mac_addr +
3770 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3771 dmae->src_addr_hi = 0;
3772 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3773 offsetof(struct bmac_stats, rx_gr64));
3774 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3775 offsetof(struct bmac_stats, rx_gr64));
3776 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3777 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3778 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3779 dmae->comp_addr_hi = 0;
3782 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
3784 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3786 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3787 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3788 dmae->opcode = opcode;
3789 dmae->src_addr_lo = (mac_addr +
3790 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3791 dmae->src_addr_hi = 0;
3792 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3793 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3794 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3795 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3796 dmae->comp_addr_hi = 0;
3799 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3800 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3801 dmae->opcode = opcode;
3802 dmae->src_addr_lo = (mac_addr +
3803 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3804 dmae->src_addr_hi = 0;
3805 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3806 offsetof(struct emac_stats,
3807 rx_falsecarriererrors));
3808 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3809 offsetof(struct emac_stats,
3810 rx_falsecarriererrors));
3812 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3813 dmae->comp_addr_hi = 0;
3816 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3817 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3818 dmae->opcode = opcode;
3819 dmae->src_addr_lo = (mac_addr +
3820 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3821 dmae->src_addr_hi = 0;
3822 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3823 offsetof(struct emac_stats,
3825 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3826 offsetof(struct emac_stats,
3828 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3829 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3830 dmae->comp_addr_hi = 0;
3835 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3836 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3837 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3838 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3840 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3842 DMAE_CMD_ENDIANITY_DW_SWAP |
3844 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
3845 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3846 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3847 dmae->src_addr_hi = 0;
3848 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
3849 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
3850 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
3851 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
3852 offsetof(struct nig_stats, done));
3853 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
3854 offsetof(struct nig_stats, done));
3855 dmae->comp_val = 0xffffffff;
3858 static void bnx2x_init_stats(struct bnx2x *bp)
3860 int port = bp->port;
3862 bp->stats_state = STATS_STATE_DISABLE;
3863 bp->executer_idx = 0;
3865 bp->old_brb_discard = REG_RD(bp,
3866 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3868 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
3869 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3870 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3872 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
3873 REG_WR(bp, BAR_XSTRORM_INTMEM +
3874 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3876 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
3877 REG_WR(bp, BAR_TSTRORM_INTMEM +
3878 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3880 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
3881 REG_WR(bp, BAR_CSTRORM_INTMEM +
3882 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
3884 REG_WR(bp, BAR_XSTRORM_INTMEM +
3885 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3886 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3887 REG_WR(bp, BAR_XSTRORM_INTMEM +
3888 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3889 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3891 REG_WR(bp, BAR_TSTRORM_INTMEM +
3892 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
3893 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3894 REG_WR(bp, BAR_TSTRORM_INTMEM +
3895 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
3896 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3899 static void bnx2x_stop_stats(struct bnx2x *bp)
3902 if (bp->stats_state != STATS_STATE_DISABLE) {
3905 bp->stats_state = STATS_STATE_STOP;
3906 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
3908 while (bp->stats_state != STATS_STATE_DISABLE) {
3910 BNX2X_ERR("timeout waiting for stats stop\n");
3917 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
3921 * Statistics service functions
3924 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
3928 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
3929 struct bmac_stats *old = &bp->old_bmac;
3930 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3935 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
3936 tx_gtbyt.lo, total_bytes_transmitted_lo);
3938 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
3939 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
3940 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3942 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
3943 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
3944 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
3946 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
3947 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
3948 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
3949 estats->total_unicast_packets_transmitted_lo, sum.lo);
3951 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
3952 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
3953 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
3954 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
3955 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
3956 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
3957 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
3958 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
3959 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
3960 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
3961 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
3963 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
3964 UPDATE_STAT(rx_grund.lo, runt_packets_received);
3965 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
3966 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
3967 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
3968 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
3969 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
3970 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
3972 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
3973 rx_grerb.lo, stat_IfHCInBadOctets_lo);
3974 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
3975 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
3976 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
3977 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
3978 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
3981 static void bnx2x_update_emac_stats(struct bnx2x *bp)
3983 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
3984 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
3986 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
3987 total_bytes_transmitted_lo);
3988 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
3989 total_unicast_packets_transmitted_hi,
3990 total_unicast_packets_transmitted_lo);
3991 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
3992 total_multicast_packets_transmitted_hi,
3993 total_multicast_packets_transmitted_lo);
3994 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
3995 total_broadcast_packets_transmitted_hi,
3996 total_broadcast_packets_transmitted_lo);
3998 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
3999 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4000 estats->single_collision_transmit_frames +=
4001 new->tx_dot3statssinglecollisionframes;
4002 estats->multiple_collision_transmit_frames +=
4003 new->tx_dot3statsmultiplecollisionframes;
4004 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4005 estats->excessive_collision_frames +=
4006 new->tx_dot3statsexcessivecollisions;
4007 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4008 estats->frames_transmitted_65_127_bytes +=
4009 new->tx_etherstatspkts65octetsto127octets;
4010 estats->frames_transmitted_128_255_bytes +=
4011 new->tx_etherstatspkts128octetsto255octets;
4012 estats->frames_transmitted_256_511_bytes +=
4013 new->tx_etherstatspkts256octetsto511octets;
4014 estats->frames_transmitted_512_1023_bytes +=
4015 new->tx_etherstatspkts512octetsto1023octets;
4016 estats->frames_transmitted_1024_1522_bytes +=
4017 new->tx_etherstatspkts1024octetsto1522octet;
4018 estats->frames_transmitted_1523_9022_bytes +=
4019 new->tx_etherstatspktsover1522octets;
4021 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4022 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4023 estats->false_carrier_detections += new->rx_falsecarriererrors;
4024 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4025 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4026 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4027 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4028 estats->control_frames_received += new->rx_maccontrolframesreceived;
4029 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4030 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4032 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4033 stat_IfHCInBadOctets_lo);
4034 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4035 stat_IfHCOutBadOctets_lo);
4036 estats->stat_Dot3statsInternalMacTransmitErrors +=
4037 new->tx_dot3statsinternalmactransmiterrors;
4038 estats->stat_Dot3StatsCarrierSenseErrors +=
4039 new->rx_dot3statscarriersenseerrors;
4040 estats->stat_Dot3StatsDeferredTransmissions +=
4041 new->tx_dot3statsdeferredtransmissions;
4042 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4043 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4046 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4048 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4049 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4050 struct tstorm_per_client_stats *tclient =
4051 &tstats->client_statistics[0];
4052 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4053 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4054 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4055 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4058 /* are DMAE stats valid? */
4059 if (nstats->done != 0xffffffff) {
4060 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4064 /* are storm stats valid? */
4065 if (tstats->done.hi != 0xffffffff) {
4066 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4069 if (xstats->done.hi != 0xffffffff) {
4070 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4074 estats->total_bytes_received_hi =
4075 estats->valid_bytes_received_hi =
4076 le32_to_cpu(tclient->total_rcv_bytes.hi);
4077 estats->total_bytes_received_lo =
4078 estats->valid_bytes_received_lo =
4079 le32_to_cpu(tclient->total_rcv_bytes.lo);
4080 ADD_64(estats->total_bytes_received_hi,
4081 le32_to_cpu(tclient->rcv_error_bytes.hi),
4082 estats->total_bytes_received_lo,
4083 le32_to_cpu(tclient->rcv_error_bytes.lo));
4085 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4086 total_unicast_packets_received_hi,
4087 total_unicast_packets_received_lo);
4088 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4089 total_multicast_packets_received_hi,
4090 total_multicast_packets_received_lo);
4091 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4092 total_broadcast_packets_received_hi,
4093 total_broadcast_packets_received_lo);
4095 estats->frames_received_64_bytes = MAC_STX_NA;
4096 estats->frames_received_65_127_bytes = MAC_STX_NA;
4097 estats->frames_received_128_255_bytes = MAC_STX_NA;
4098 estats->frames_received_256_511_bytes = MAC_STX_NA;
4099 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4100 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4101 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4103 estats->x_total_sent_bytes_hi =
4104 le32_to_cpu(xstats->total_sent_bytes.hi);
4105 estats->x_total_sent_bytes_lo =
4106 le32_to_cpu(xstats->total_sent_bytes.lo);
4107 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4109 estats->t_rcv_unicast_bytes_hi =
4110 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4111 estats->t_rcv_unicast_bytes_lo =
4112 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4113 estats->t_rcv_broadcast_bytes_hi =
4114 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4115 estats->t_rcv_broadcast_bytes_lo =
4116 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4117 estats->t_rcv_multicast_bytes_hi =
4118 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4119 estats->t_rcv_multicast_bytes_lo =
4120 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4121 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4123 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4124 estats->packets_too_big_discard =
4125 le32_to_cpu(tclient->packets_too_big_discard);
4126 estats->jabber_packets_received = estats->packets_too_big_discard +
4127 estats->stat_Dot3statsFramesTooLong;
4128 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4129 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4130 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4131 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4132 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4133 estats->brb_truncate_discard =
4134 le32_to_cpu(tstats->brb_truncate_discard);
4136 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4137 bp->old_brb_discard = nstats->brb_discard;
4139 estats->brb_packet = nstats->brb_packet;
4140 estats->brb_truncate = nstats->brb_truncate;
4141 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4142 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4143 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4144 estats->mng_discard = nstats->mng_discard;
4145 estats->mng_octet_inp = nstats->mng_octet_inp;
4146 estats->mng_octet_out = nstats->mng_octet_out;
4147 estats->mng_packet_inp = nstats->mng_packet_inp;
4148 estats->mng_packet_out = nstats->mng_packet_out;
4149 estats->pbf_octets = nstats->pbf_octets;
4150 estats->pbf_packet = nstats->pbf_packet;
4151 estats->safc_inp = nstats->safc_inp;
4153 xstats->done.hi = 0;
4154 tstats->done.hi = 0;
4160 static void bnx2x_update_net_stats(struct bnx2x *bp)
4162 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4163 struct net_device_stats *nstats = &bp->dev->stats;
4165 nstats->rx_packets =
4166 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4167 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4168 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4170 nstats->tx_packets =
4171 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4172 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4173 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4175 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4178 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4180 nstats->rx_dropped = estats->checksum_discard +
4181 estats->mac_discard;
4182 nstats->tx_dropped = 0;
4185 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4187 nstats->collisions =
4188 estats->single_collision_transmit_frames +
4189 estats->multiple_collision_transmit_frames +
4190 estats->late_collision_frames +
4191 estats->excessive_collision_frames;
4193 nstats->rx_length_errors = estats->runt_packets_received +
4194 estats->jabber_packets_received;
4195 nstats->rx_over_errors = estats->no_buff_discard;
4196 nstats->rx_crc_errors = estats->crc_receive_errors;
4197 nstats->rx_frame_errors = estats->alignment_errors;
4198 nstats->rx_fifo_errors = estats->brb_discard +
4199 estats->brb_truncate_discard;
4200 nstats->rx_missed_errors = estats->xxoverflow_discard;
4202 nstats->rx_errors = nstats->rx_length_errors +
4203 nstats->rx_over_errors +
4204 nstats->rx_crc_errors +
4205 nstats->rx_frame_errors +
4206 nstats->rx_fifo_errors;
4208 nstats->tx_aborted_errors = estats->late_collision_frames +
4209 estats->excessive_collision_frames;
4210 nstats->tx_carrier_errors = estats->false_carrier_detections;
4211 nstats->tx_fifo_errors = 0;
4212 nstats->tx_heartbeat_errors = 0;
4213 nstats->tx_window_errors = 0;
4215 nstats->tx_errors = nstats->tx_aborted_errors +
4216 nstats->tx_carrier_errors;
4218 estats->mac_stx_start = ++estats->mac_stx_end;
4221 static void bnx2x_update_stats(struct bnx2x *bp)
4225 if (!bnx2x_update_storm_stats(bp)) {
4227 if (bp->phy_flags & PHY_BMAC_FLAG) {
4228 bnx2x_update_bmac_stats(bp);
4230 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4231 bnx2x_update_emac_stats(bp);
4233 } else { /* unreached */
4234 BNX2X_ERR("no MAC active\n");
4238 bnx2x_update_net_stats(bp);
4241 if (bp->msglevel & NETIF_MSG_TIMER) {
4242 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4243 struct net_device_stats *nstats = &bp->dev->stats;
4245 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4246 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4248 bnx2x_tx_avail(bp->fp),
4249 *bp->fp->tx_cons_sb, nstats->tx_packets);
4250 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4252 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
4253 *bp->fp->rx_cons_sb, nstats->rx_packets);
4254 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
4255 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
4256 estats->driver_xoff, estats->brb_discard);
4257 printk(KERN_DEBUG "tstats: checksum_discard %u "
4258 "packets_too_big_discard %u no_buff_discard %u "
4259 "mac_discard %u mac_filter_discard %u "
4260 "xxovrflow_discard %u brb_truncate_discard %u "
4261 "ttl0_discard %u\n",
4262 estats->checksum_discard,
4263 estats->packets_too_big_discard,
4264 estats->no_buff_discard, estats->mac_discard,
4265 estats->mac_filter_discard, estats->xxoverflow_discard,
4266 estats->brb_truncate_discard, estats->ttl0_discard);
4268 for_each_queue(bp, i) {
4269 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4270 bnx2x_fp(bp, i, tx_pkt),
4271 bnx2x_fp(bp, i, rx_pkt),
4272 bnx2x_fp(bp, i, rx_calls));
4276 if (bp->state != BNX2X_STATE_OPEN) {
4277 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
4281 #ifdef BNX2X_STOP_ON_ERROR
4282 if (unlikely(bp->panic))
4287 if (bp->executer_idx) {
4288 struct dmae_command *dmae = &bp->dmae;
4289 int port = bp->port;
4290 int loader_idx = port * 8;
4292 memset(dmae, 0, sizeof(struct dmae_command));
4294 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4295 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4296 DMAE_CMD_DST_RESET |
4298 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4300 DMAE_CMD_ENDIANITY_DW_SWAP |
4302 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4303 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
4304 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
4305 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
4306 sizeof(struct dmae_command) *
4307 (loader_idx + 1)) >> 2;
4308 dmae->dst_addr_hi = 0;
4309 dmae->len = sizeof(struct dmae_command) >> 2;
4310 dmae->len--; /* !!! for A0/1 only */
4311 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
4312 dmae->comp_addr_hi = 0;
4315 bnx2x_post_dmae(bp, dmae, loader_idx);
4318 if (bp->stats_state != STATS_STATE_ENABLE) {
4319 bp->stats_state = STATS_STATE_DISABLE;
4323 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
4324 /* stats ramrod has it's own slot on the spe */
4326 bp->stat_pending = 1;
4330 static void bnx2x_timer(unsigned long data)
4332 struct bnx2x *bp = (struct bnx2x *) data;
4334 if (!netif_running(bp->dev))
4337 if (atomic_read(&bp->intr_sem) != 0)
4338 goto bnx2x_restart_timer;
4341 struct bnx2x_fastpath *fp = &bp->fp[0];
4344 bnx2x_tx_int(fp, 1000);
4345 rc = bnx2x_rx_int(fp, 1000);
4348 if (!nomcp && (bp->bc_ver >= 0x040003)) {
4349 int port = bp->port;
4353 ++bp->fw_drv_pulse_wr_seq;
4354 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4355 /* TBD - add SYSTEM_TIME */
4356 drv_pulse = bp->fw_drv_pulse_wr_seq;
4357 SHMEM_WR(bp, drv_fw_mb[port].drv_pulse_mb, drv_pulse);
4359 mcp_pulse = (SHMEM_RD(bp, drv_fw_mb[port].mcp_pulse_mb) &
4360 MCP_PULSE_SEQ_MASK);
4361 /* The delta between driver pulse and mcp response
4362 * should be 1 (before mcp response) or 0 (after mcp response)
4364 if ((drv_pulse != mcp_pulse) &&
4365 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4366 /* someone lost a heartbeat... */
4367 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4368 drv_pulse, mcp_pulse);
4372 if (bp->stats_state == STATS_STATE_DISABLE)
4373 goto bnx2x_restart_timer;
4375 bnx2x_update_stats(bp);
4377 bnx2x_restart_timer:
4378 mod_timer(&bp->timer, jiffies + bp->current_interval);
4381 /* end of Statistics */
4386 * nic init service functions
4389 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4390 dma_addr_t mapping, int id)
4392 int port = bp->port;
4397 section = ((u64)mapping) + offsetof(struct host_status_block,
4399 sb->u_status_block.status_block_id = id;
4401 REG_WR(bp, BAR_USTRORM_INTMEM +
4402 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4403 REG_WR(bp, BAR_USTRORM_INTMEM +
4404 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4407 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4408 REG_WR16(bp, BAR_USTRORM_INTMEM +
4409 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4412 section = ((u64)mapping) + offsetof(struct host_status_block,
4414 sb->c_status_block.status_block_id = id;
4416 REG_WR(bp, BAR_CSTRORM_INTMEM +
4417 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
4418 REG_WR(bp, BAR_CSTRORM_INTMEM +
4419 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
4422 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4423 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4424 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
4426 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4429 static void bnx2x_init_def_sb(struct bnx2x *bp,
4430 struct host_def_status_block *def_sb,
4431 dma_addr_t mapping, int id)
4433 int port = bp->port;
4434 int index, val, reg_offset;
4438 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4439 atten_status_block);
4440 def_sb->atten_status_block.status_block_id = id;
4442 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4443 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4445 for (index = 0; index < 3; index++) {
4446 bp->attn_group[index].sig[0] = REG_RD(bp,
4447 reg_offset + 0x10*index);
4448 bp->attn_group[index].sig[1] = REG_RD(bp,
4449 reg_offset + 0x4 + 0x10*index);
4450 bp->attn_group[index].sig[2] = REG_RD(bp,
4451 reg_offset + 0x8 + 0x10*index);
4452 bp->attn_group[index].sig[3] = REG_RD(bp,
4453 reg_offset + 0xc + 0x10*index);
4456 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4457 MISC_REG_AEU_MASK_ATTN_FUNC_0));
4459 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4460 HC_REG_ATTN_MSG0_ADDR_L);
4462 REG_WR(bp, reg_offset, U64_LO(section));
4463 REG_WR(bp, reg_offset + 4, U64_HI(section));
4465 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4467 val = REG_RD(bp, reg_offset);
4469 REG_WR(bp, reg_offset, val);
4472 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4473 u_def_status_block);
4474 def_sb->u_def_status_block.status_block_id = id;
4476 REG_WR(bp, BAR_USTRORM_INTMEM +
4477 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4478 REG_WR(bp, BAR_USTRORM_INTMEM +
4479 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4481 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
4484 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4485 REG_WR16(bp, BAR_USTRORM_INTMEM +
4486 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4489 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4490 c_def_status_block);
4491 def_sb->c_def_status_block.status_block_id = id;
4493 REG_WR(bp, BAR_CSTRORM_INTMEM +
4494 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4495 REG_WR(bp, BAR_CSTRORM_INTMEM +
4496 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4498 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
4501 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4502 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4503 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4506 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4507 t_def_status_block);
4508 def_sb->t_def_status_block.status_block_id = id;
4510 REG_WR(bp, BAR_TSTRORM_INTMEM +
4511 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4512 REG_WR(bp, BAR_TSTRORM_INTMEM +
4513 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4515 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
4518 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4519 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4520 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4523 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4524 x_def_status_block);
4525 def_sb->x_def_status_block.status_block_id = id;
4527 REG_WR(bp, BAR_XSTRORM_INTMEM +
4528 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
4529 REG_WR(bp, BAR_XSTRORM_INTMEM +
4530 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
4532 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
4535 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4536 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4537 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
4539 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4542 static void bnx2x_update_coalesce(struct bnx2x *bp)
4544 int port = bp->port;
4547 for_each_queue(bp, i) {
4549 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4550 REG_WR8(bp, BAR_USTRORM_INTMEM +
4551 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4552 HC_INDEX_U_ETH_RX_CQ_CONS),
4553 bp->rx_ticks_int/12);
4554 REG_WR16(bp, BAR_USTRORM_INTMEM +
4555 USTORM_SB_HC_DISABLE_OFFSET(port, i,
4556 HC_INDEX_U_ETH_RX_CQ_CONS),
4557 bp->rx_ticks_int ? 0 : 1);
4559 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4560 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4561 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
4562 HC_INDEX_C_ETH_TX_CQ_CONS),
4563 bp->tx_ticks_int/12);
4564 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4565 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
4566 HC_INDEX_C_ETH_TX_CQ_CONS),
4567 bp->tx_ticks_int ? 0 : 1);
4571 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4575 int port = bp->port;
4577 bp->rx_buf_use_size = bp->dev->mtu;
4579 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
4580 bp->rx_buf_size = bp->rx_buf_use_size + 64;
4582 for_each_queue(bp, j) {
4583 struct bnx2x_fastpath *fp = &bp->fp[j];
4586 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4588 for (i = 1; i <= NUM_RX_RINGS; i++) {
4589 struct eth_rx_bd *rx_bd;
4591 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4593 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4594 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4596 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4597 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4601 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4602 struct eth_rx_cqe_next_page *nextpg;
4604 nextpg = (struct eth_rx_cqe_next_page *)
4605 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4607 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4608 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4610 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4611 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4614 /* rx completion queue */
4615 fp->rx_comp_cons = ring_prod = 0;
4617 for (i = 0; i < bp->rx_ring_size; i++) {
4618 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4619 BNX2X_ERR("was only able to allocate "
4623 ring_prod = NEXT_RX_IDX(ring_prod);
4624 BUG_TRAP(ring_prod > i);
4627 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
4628 fp->rx_pkt = fp->rx_calls = 0;
4630 /* Warning! this will generate an interrupt (to the TSTORM) */
4631 /* must only be done when chip is initialized */
4632 REG_WR(bp, BAR_TSTRORM_INTMEM +
4633 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
4637 REG_WR(bp, BAR_USTRORM_INTMEM +
4638 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
4639 U64_LO(fp->rx_comp_mapping));
4640 REG_WR(bp, BAR_USTRORM_INTMEM +
4641 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
4642 U64_HI(fp->rx_comp_mapping));
4646 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4650 for_each_queue(bp, j) {
4651 struct bnx2x_fastpath *fp = &bp->fp[j];
4653 for (i = 1; i <= NUM_TX_RINGS; i++) {
4654 struct eth_tx_bd *tx_bd =
4655 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4658 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4659 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4661 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4662 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4665 fp->tx_pkt_prod = 0;
4666 fp->tx_pkt_cons = 0;
4669 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4674 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4676 int port = bp->port;
4678 spin_lock_init(&bp->spq_lock);
4680 bp->spq_left = MAX_SPQ_PENDING;
4681 bp->spq_prod_idx = 0;
4682 bp->dsb_sp_prod_idx = 0;
4683 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4684 bp->spq_prod_bd = bp->spq;
4685 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4687 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
4688 U64_LO(bp->spq_mapping));
4689 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
4690 U64_HI(bp->spq_mapping));
4692 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
4696 static void bnx2x_init_context(struct bnx2x *bp)
4700 for_each_queue(bp, i) {
4701 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4702 struct bnx2x_fastpath *fp = &bp->fp[i];
4704 context->xstorm_st_context.tx_bd_page_base_hi =
4705 U64_HI(fp->tx_desc_mapping);
4706 context->xstorm_st_context.tx_bd_page_base_lo =
4707 U64_LO(fp->tx_desc_mapping);
4708 context->xstorm_st_context.db_data_addr_hi =
4709 U64_HI(fp->tx_prods_mapping);
4710 context->xstorm_st_context.db_data_addr_lo =
4711 U64_LO(fp->tx_prods_mapping);
4713 context->ustorm_st_context.rx_bd_page_base_hi =
4714 U64_HI(fp->rx_desc_mapping);
4715 context->ustorm_st_context.rx_bd_page_base_lo =
4716 U64_LO(fp->rx_desc_mapping);
4717 context->ustorm_st_context.status_block_id = i;
4718 context->ustorm_st_context.sb_index_number =
4719 HC_INDEX_U_ETH_RX_CQ_CONS;
4720 context->ustorm_st_context.rcq_base_address_hi =
4721 U64_HI(fp->rx_comp_mapping);
4722 context->ustorm_st_context.rcq_base_address_lo =
4723 U64_LO(fp->rx_comp_mapping);
4724 context->ustorm_st_context.flags =
4725 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
4726 context->ustorm_st_context.mc_alignment_size = 64;
4727 context->ustorm_st_context.num_rss = bp->num_queues;
4729 context->cstorm_st_context.sb_index_number =
4730 HC_INDEX_C_ETH_TX_CQ_CONS;
4731 context->cstorm_st_context.status_block_id = i;
4733 context->xstorm_ag_context.cdu_reserved =
4734 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4735 CDU_REGION_NUMBER_XCM_AG,
4736 ETH_CONNECTION_TYPE);
4737 context->ustorm_ag_context.cdu_usage =
4738 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4739 CDU_REGION_NUMBER_UCM_AG,
4740 ETH_CONNECTION_TYPE);
4744 static void bnx2x_init_ind_table(struct bnx2x *bp)
4746 int port = bp->port;
4752 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4753 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
4754 i % bp->num_queues);
4756 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4759 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4761 int mode = bp->rx_mode;
4762 int port = bp->port;
4763 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4766 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4769 case BNX2X_RX_MODE_NONE: /* no Rx */
4770 tstorm_mac_filter.ucast_drop_all = 1;
4771 tstorm_mac_filter.mcast_drop_all = 1;
4772 tstorm_mac_filter.bcast_drop_all = 1;
4774 case BNX2X_RX_MODE_NORMAL:
4775 tstorm_mac_filter.bcast_accept_all = 1;
4777 case BNX2X_RX_MODE_ALLMULTI:
4778 tstorm_mac_filter.mcast_accept_all = 1;
4779 tstorm_mac_filter.bcast_accept_all = 1;
4781 case BNX2X_RX_MODE_PROMISC:
4782 tstorm_mac_filter.ucast_accept_all = 1;
4783 tstorm_mac_filter.mcast_accept_all = 1;
4784 tstorm_mac_filter.bcast_accept_all = 1;
4787 BNX2X_ERR("bad rx mode (%d)\n", mode);
4790 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4791 REG_WR(bp, BAR_TSTRORM_INTMEM +
4792 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
4793 ((u32 *)&tstorm_mac_filter)[i]);
4795 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4796 ((u32 *)&tstorm_mac_filter)[i]); */
4800 static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
4803 int mode = bp->rx_mode;
4805 int port = bp->port;
4806 struct tstorm_eth_client_config tstorm_client = {0};
4808 tstorm_client.mtu = bp->dev->mtu;
4809 tstorm_client.statistics_counter_id = 0;
4810 tstorm_client.config_flags =
4811 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4813 if (mode && bp->vlgrp) {
4814 tstorm_client.config_flags |=
4815 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4816 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4819 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
4820 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
4821 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
4822 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
4824 REG_WR(bp, BAR_TSTRORM_INTMEM +
4825 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
4826 ((u32 *)&tstorm_client)[0]);
4827 REG_WR(bp, BAR_TSTRORM_INTMEM +
4828 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
4829 ((u32 *)&tstorm_client)[1]);
4831 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
4832 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
4835 static void bnx2x_init_internal(struct bnx2x *bp)
4837 int port = bp->port;
4838 struct tstorm_eth_function_common_config tstorm_config = {0};
4839 struct stats_indication_flags stats_flags = {0};
4843 tstorm_config.config_flags = MULTI_FLAGS;
4844 tstorm_config.rss_result_mask = MULTI_MASK;
4847 REG_WR(bp, BAR_TSTRORM_INTMEM +
4848 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
4849 (*(u32 *)&tstorm_config));
4851 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
4852 (*(u32 *)&tstorm_config)); */
4854 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4855 bnx2x_set_storm_rx_mode(bp);
4857 for_each_queue(bp, i)
4858 bnx2x_set_client_config(bp, i);
4861 stats_flags.collect_eth = cpu_to_le32(1);
4863 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4864 ((u32 *)&stats_flags)[0]);
4865 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4866 ((u32 *)&stats_flags)[1]);
4868 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4869 ((u32 *)&stats_flags)[0]);
4870 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4871 ((u32 *)&stats_flags)[1]);
4873 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4874 ((u32 *)&stats_flags)[0]);
4875 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4876 ((u32 *)&stats_flags)[1]);
4878 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
4879 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
4882 static void bnx2x_nic_init(struct bnx2x *bp)
4886 for_each_queue(bp, i) {
4887 struct bnx2x_fastpath *fp = &bp->fp[i];
4889 fp->state = BNX2X_FP_STATE_CLOSED;
4890 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
4891 bp, fp->status_blk, i);
4893 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
4896 bnx2x_init_def_sb(bp, bp->def_status_blk,
4897 bp->def_status_blk_mapping, 0x10);
4898 bnx2x_update_coalesce(bp);
4899 bnx2x_init_rx_rings(bp);
4900 bnx2x_init_tx_ring(bp);
4901 bnx2x_init_sp_ring(bp);
4902 bnx2x_init_context(bp);
4903 bnx2x_init_internal(bp);
4904 bnx2x_init_stats(bp);
4905 bnx2x_init_ind_table(bp);
4906 bnx2x_enable_int(bp);
4910 /* end of nic init */
4913 * gzip service functions
4916 static int bnx2x_gunzip_init(struct bnx2x *bp)
4918 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4919 &bp->gunzip_mapping);
4920 if (bp->gunzip_buf == NULL)
4923 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4924 if (bp->strm == NULL)
4927 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4929 if (bp->strm->workspace == NULL)
4939 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4940 bp->gunzip_mapping);
4941 bp->gunzip_buf = NULL;
4944 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
4945 " uncompression\n", bp->dev->name);
4949 static void bnx2x_gunzip_end(struct bnx2x *bp)
4951 kfree(bp->strm->workspace);
4956 if (bp->gunzip_buf) {
4957 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4958 bp->gunzip_mapping);
4959 bp->gunzip_buf = NULL;
4963 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4967 /* check gzip header */
4968 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4975 if (zbuf[3] & FNAME)
4976 while ((zbuf[n++] != 0) && (n < len));
4978 bp->strm->next_in = zbuf + n;
4979 bp->strm->avail_in = len - n;
4980 bp->strm->next_out = bp->gunzip_buf;
4981 bp->strm->avail_out = FW_BUF_SIZE;
4983 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4987 rc = zlib_inflate(bp->strm, Z_FINISH);
4988 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4989 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4990 bp->dev->name, bp->strm->msg);
4992 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4993 if (bp->gunzip_outlen & 0x3)
4994 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4995 " gunzip_outlen (%d) not aligned\n",
4996 bp->dev->name, bp->gunzip_outlen);
4997 bp->gunzip_outlen >>= 2;
4999 zlib_inflateEnd(bp->strm);
5001 if (rc == Z_STREAM_END)
5007 /* nic load/unload */
5010 * general service functions
5013 /* send a NIG loopback debug packet */
5014 static void bnx2x_lb_pckt(struct bnx2x *bp)
5020 /* Ethernet source and destination addresses */
5022 wb_write[0] = 0x55555555;
5023 wb_write[1] = 0x55555555;
5024 wb_write[2] = 0x20; /* SOP */
5025 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5027 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5028 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5030 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5033 /* NON-IP protocol */
5035 wb_write[0] = 0x09000000;
5036 wb_write[1] = 0x55555555;
5037 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5038 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5040 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5041 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5042 /* EOP, eop_bvalid = 0 */
5043 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5047 /* some of the internal memories
5048 * are not directly readable from the driver
5049 * to test them we send debug packets
5051 static int bnx2x_int_mem_test(struct bnx2x *bp)
5057 switch (CHIP_REV(bp)) {
5069 DP(NETIF_MSG_HW, "start part1\n");
5071 /* Disable inputs of parser neighbor blocks */
5072 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5073 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5074 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5075 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5077 /* Write 0 to parser credits for CFC search request */
5078 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5080 /* send Ethernet packet */
5083 /* TODO do i reset NIG statistic? */
5084 /* Wait until NIG register shows 1 packet of size 0x10 */
5085 count = 1000 * factor;
5087 #ifdef BNX2X_DMAE_RD
5088 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5089 val = *bnx2x_sp(bp, wb_data[0]);
5091 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5092 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5101 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5105 /* Wait until PRS register shows 1 packet */
5106 count = 1000 * factor;
5108 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5117 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5121 /* Reset and init BRB, PRS */
5122 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5124 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5126 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5127 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5129 DP(NETIF_MSG_HW, "part2\n");
5131 /* Disable inputs of parser neighbor blocks */
5132 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5133 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5134 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5135 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5137 /* Write 0 to parser credits for CFC search request */
5138 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5140 /* send 10 Ethernet packets */
5141 for (i = 0; i < 10; i++)
5144 /* Wait until NIG register shows 10 + 1
5145 packets of size 11*0x10 = 0xb0 */
5146 count = 1000 * factor;
5148 #ifdef BNX2X_DMAE_RD
5149 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5150 val = *bnx2x_sp(bp, wb_data[0]);
5152 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5153 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5162 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5166 /* Wait until PRS register shows 2 packets */
5167 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5169 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5171 /* Write 1 to parser credits for CFC search request */
5172 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5174 /* Wait until PRS register shows 3 packets */
5175 msleep(10 * factor);
5176 /* Wait until NIG register shows 1 packet of size 0x10 */
5177 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5179 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5181 /* clear NIG EOP FIFO */
5182 for (i = 0; i < 11; i++)
5183 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5184 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5186 BNX2X_ERR("clear of NIG failed\n");
5190 /* Reset and init BRB, PRS, NIG */
5191 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5195 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5196 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5199 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5202 /* Enable inputs of parser neighbor blocks */
5203 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5204 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5205 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5206 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
5208 DP(NETIF_MSG_HW, "done\n");
5213 static void enable_blocks_attention(struct bnx2x *bp)
5215 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5216 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5217 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5218 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5219 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5220 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5221 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5222 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5223 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5224 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5225 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5226 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5227 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5228 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5229 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5230 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5231 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5232 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5233 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5234 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5235 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5236 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5237 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
5238 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5239 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5240 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5241 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5242 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5243 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5244 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5245 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5246 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5249 static int bnx2x_function_init(struct bnx2x *bp, int mode)
5251 int func = bp->port;
5252 int port = func ? PORT1 : PORT0;
5258 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
5259 if ((func != 0) && (func != 1)) {
5260 BNX2X_ERR("BAD function number (%d)\n", func);
5264 bnx2x_gunzip_init(bp);
5266 if (mode & 0x1) { /* init common */
5267 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
5269 REG_WR(bp, MISC_REG_RESET_REG_1, 0xffffffff);
5270 REG_WR(bp, MISC_REG_RESET_REG_2, 0xfffc);
5271 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5273 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5275 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5277 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5278 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5282 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5283 /* enable HW interrupt from PXP on USDM
5284 overflow bit 16 on INT_MASK_0 */
5285 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5289 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5290 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5291 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5292 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5293 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5294 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5296 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5297 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5298 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5299 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5300 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5305 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5308 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
5310 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5311 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5312 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5315 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5317 /* let the HW do it's magic ... */
5320 (can be moved up if we want to use the DMAE) */
5321 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5323 BNX2X_ERR("PXP2 CFG failed\n");
5327 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5329 BNX2X_ERR("PXP2 RD_INIT failed\n");
5333 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5334 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5336 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5338 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5339 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5340 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5341 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5343 #ifdef BNX2X_DMAE_RD
5344 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5345 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5346 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5347 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5349 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
5350 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
5351 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
5352 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
5353 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
5354 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
5355 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
5356 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
5357 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
5358 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
5359 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
5360 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
5362 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5363 /* soft reset pulse */
5364 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5365 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5368 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5370 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5371 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
5372 if (CHIP_REV(bp) == CHIP_REV_Ax) {
5373 /* enable hw interrupt from doorbell Q */
5374 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5377 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5379 if (CHIP_REV_IS_SLOW(bp)) {
5380 /* fix for emulation and FPGA for no pause */
5381 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5382 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5383 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5384 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5387 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5389 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5390 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5391 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5392 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5394 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5395 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5396 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5397 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
5399 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5400 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5401 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5402 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5405 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5407 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5410 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5411 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5412 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5414 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5415 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5416 REG_WR(bp, i, 0xc0cac01a);
5417 /* TODO: replace with something meaningful */
5419 /* SRCH COMMON comes here */
5420 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5422 if (sizeof(union cdu_context) != 1024) {
5423 /* we currently assume that a context is 1024 bytes */
5424 printk(KERN_ALERT PFX "please adjust the size of"
5425 " cdu_context(%ld)\n",
5426 (long)sizeof(union cdu_context));
5428 val = (4 << 24) + (0 << 12) + 1024;
5429 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5430 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5432 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5433 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5435 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5436 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
5437 MISC_AEU_COMMON_END);
5438 /* RXPCS COMMON comes here */
5439 /* EMAC0 COMMON comes here */
5440 /* EMAC1 COMMON comes here */
5441 /* DBU COMMON comes here */
5442 /* DBG COMMON comes here */
5443 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5445 if (CHIP_REV_IS_SLOW(bp))
5448 /* finish CFC init */
5449 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
5451 BNX2X_ERR("CFC LL_INIT failed\n");
5455 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
5457 BNX2X_ERR("CFC AC_INIT failed\n");
5461 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
5463 BNX2X_ERR("CFC CAM_INIT failed\n");
5467 REG_WR(bp, CFC_REG_DEBUG0, 0);
5469 /* read NIG statistic
5470 to see if this is our first up since powerup */
5471 #ifdef BNX2X_DMAE_RD
5472 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5473 val = *bnx2x_sp(bp, wb_data[0]);
5475 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5476 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5478 /* do internal memory self test */
5479 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5480 BNX2X_ERR("internal mem selftest failed\n");
5484 /* clear PXP2 attentions */
5485 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
5487 enable_blocks_attention(bp);
5488 /* enable_blocks_parity(bp); */
5490 } /* end of common init */
5494 /* the phys address is shifted right 12 bits and has an added
5495 1=valid bit added to the 53rd bit
5496 then since this is a wide register(TM)
5497 we split it into two 32 bit writes
5499 #define RQ_ONCHIP_AT_PORT_SIZE 384
5500 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5501 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5502 #define PXP_ONE_ILT(x) ((x << 10) | x)
5504 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
5506 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
5508 /* Port PXP comes here */
5509 /* Port PXP2 comes here */
5514 i = func * RQ_ONCHIP_AT_PORT_SIZE;
5516 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
5517 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
5518 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5520 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
5521 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
5522 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
5523 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
5525 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
5531 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5532 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5533 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5534 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5539 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5540 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5541 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5542 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5547 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5548 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5549 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5550 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5553 /* Port TCM comes here */
5554 /* Port UCM comes here */
5555 /* Port CCM comes here */
5556 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
5557 func ? XCM_PORT1_END : XCM_PORT0_END);
5563 for (i = 0; i < 32; i++) {
5564 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
5566 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
5568 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
5569 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
5572 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
5574 /* Port QM comes here */
5577 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5578 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5580 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5581 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5583 /* Port DQ comes here */
5584 /* Port BRB1 comes here */
5585 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
5586 func ? PRS_PORT1_END : PRS_PORT0_END);
5587 /* Port TSDM comes here */
5588 /* Port CSDM comes here */
5589 /* Port USDM comes here */
5590 /* Port XSDM comes here */
5591 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
5592 func ? TSEM_PORT1_END : TSEM_PORT0_END);
5593 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
5594 func ? USEM_PORT1_END : USEM_PORT0_END);
5595 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
5596 func ? CSEM_PORT1_END : CSEM_PORT0_END);
5597 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
5598 func ? XSEM_PORT1_END : XSEM_PORT0_END);
5599 /* Port UPB comes here */
5600 /* Port XSDM comes here */
5601 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
5602 func ? PBF_PORT1_END : PBF_PORT0_END);
5604 /* configure PBF to work without PAUSE mtu 9000 */
5605 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
5607 /* update threshold */
5608 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
5609 /* update init credit */
5610 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
5613 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
5615 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
5618 /* tell the searcher where the T2 table is */
5619 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5621 wb_write[0] = U64_LO(bp->t2_mapping);
5622 wb_write[1] = U64_HI(bp->t2_mapping);
5623 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5624 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5625 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5626 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5628 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5629 /* Port SRCH comes here */
5631 /* Port CDU comes here */
5632 /* Port CFC comes here */
5633 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
5634 func ? HC_PORT1_END : HC_PORT0_END);
5635 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
5636 MISC_AEU_PORT0_START,
5637 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5638 /* Port PXPCS comes here */
5639 /* Port EMAC0 comes here */
5640 /* Port EMAC1 comes here */
5641 /* Port DBU comes here */
5642 /* Port DBG comes here */
5643 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
5644 func ? NIG_PORT1_END : NIG_PORT0_END);
5645 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
5646 /* Port MCP comes here */
5647 /* Port DMAE comes here */
5649 bnx2x_link_reset(bp);
5651 /* Reset PCIE errors for debug */
5652 REG_WR(bp, 0x2114, 0xffffffff);
5653 REG_WR(bp, 0x2120, 0xffffffff);
5654 REG_WR(bp, 0x2814, 0xffffffff);
5656 /* !!! move to init_values.h */
5657 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5658 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5659 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5660 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
5662 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
5663 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
5664 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5665 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
5667 bnx2x_gunzip_end(bp);
5672 bp->fw_drv_pulse_wr_seq =
5673 (SHMEM_RD(bp, drv_fw_mb[port].drv_pulse_mb) &
5674 DRV_PULSE_SEQ_MASK);
5675 bp->fw_mb = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_param);
5676 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
5677 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
5685 /* send the MCP a request, block until there is a reply */
5686 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5689 u32 seq = ++bp->fw_seq;
5690 int port = bp->port;
5692 SHMEM_WR(bp, drv_fw_mb[port].drv_mb_header, command|seq);
5693 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", command|seq);
5695 /* let the FW do it's magic ... */
5696 msleep(100); /* TBD */
5698 if (CHIP_REV_IS_SLOW(bp))
5701 rc = SHMEM_RD(bp, drv_fw_mb[port].fw_mb_header);
5703 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5705 /* is this a reply to our command? */
5706 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5707 rc &= FW_MSG_CODE_MASK;
5710 BNX2X_ERR("FW failed to respond!\n");
5717 static void bnx2x_free_mem(struct bnx2x *bp)
5720 #define BNX2X_PCI_FREE(x, y, size) \
5723 pci_free_consistent(bp->pdev, size, x, y); \
5729 #define BNX2X_FREE(x) \
5740 for_each_queue(bp, i) {
5743 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5744 bnx2x_fp(bp, i, status_blk_mapping),
5745 sizeof(struct host_status_block) +
5746 sizeof(struct eth_tx_db_data));
5748 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5749 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5750 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5751 bnx2x_fp(bp, i, tx_desc_mapping),
5752 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5754 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5755 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5756 bnx2x_fp(bp, i, rx_desc_mapping),
5757 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5759 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5760 bnx2x_fp(bp, i, rx_comp_mapping),
5761 sizeof(struct eth_fast_path_rx_cqe) *
5767 /* end of fastpath */
5769 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
5770 (sizeof(struct host_def_status_block)));
5772 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
5773 (sizeof(struct bnx2x_slowpath)));
5776 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5778 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5779 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5781 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
5783 #undef BNX2X_PCI_FREE
5787 static int bnx2x_alloc_mem(struct bnx2x *bp)
5790 #define BNX2X_PCI_ALLOC(x, y, size) \
5792 x = pci_alloc_consistent(bp->pdev, size, y); \
5794 goto alloc_mem_err; \
5795 memset(x, 0, size); \
5798 #define BNX2X_ALLOC(x, size) \
5800 x = vmalloc(size); \
5802 goto alloc_mem_err; \
5803 memset(x, 0, size); \
5809 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
5811 for_each_queue(bp, i) {
5812 bnx2x_fp(bp, i, bp) = bp;
5815 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5816 &bnx2x_fp(bp, i, status_blk_mapping),
5817 sizeof(struct host_status_block) +
5818 sizeof(struct eth_tx_db_data));
5820 bnx2x_fp(bp, i, hw_tx_prods) =
5821 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5823 bnx2x_fp(bp, i, tx_prods_mapping) =
5824 bnx2x_fp(bp, i, status_blk_mapping) +
5825 sizeof(struct host_status_block);
5827 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5828 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5829 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5830 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5831 &bnx2x_fp(bp, i, tx_desc_mapping),
5832 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5834 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5835 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5836 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5837 &bnx2x_fp(bp, i, rx_desc_mapping),
5838 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5840 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5841 &bnx2x_fp(bp, i, rx_comp_mapping),
5842 sizeof(struct eth_fast_path_rx_cqe) *
5846 /* end of fastpath */
5848 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5849 sizeof(struct host_def_status_block));
5851 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5852 sizeof(struct bnx2x_slowpath));
5855 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5858 for (i = 0; i < 64*1024; i += 64) {
5859 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5860 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5863 /* allocate searcher T2 table
5864 we allocate 1/4 of alloc num for T2
5865 (which is not entered into the ILT) */
5866 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5869 for (i = 0; i < 16*1024; i += 64)
5870 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5872 /* now fixup the last line in the block to point to the next block */
5873 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5875 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5876 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5878 /* QM queues (128*MAX_CONN) */
5879 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5882 /* Slow path ring */
5883 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5891 #undef BNX2X_PCI_ALLOC
5895 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5899 for_each_queue(bp, i) {
5900 struct bnx2x_fastpath *fp = &bp->fp[i];
5902 u16 bd_cons = fp->tx_bd_cons;
5903 u16 sw_prod = fp->tx_pkt_prod;
5904 u16 sw_cons = fp->tx_pkt_cons;
5906 BUG_TRAP(fp->tx_buf_ring != NULL);
5908 while (sw_cons != sw_prod) {
5909 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5915 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5919 for_each_queue(bp, j) {
5920 struct bnx2x_fastpath *fp = &bp->fp[j];
5922 BUG_TRAP(fp->rx_buf_ring != NULL);
5924 for (i = 0; i < NUM_RX_BD; i++) {
5925 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5926 struct sk_buff *skb = rx_buf->skb;
5931 pci_unmap_single(bp->pdev,
5932 pci_unmap_addr(rx_buf, mapping),
5933 bp->rx_buf_use_size,
5934 PCI_DMA_FROMDEVICE);
5942 static void bnx2x_free_skbs(struct bnx2x *bp)
5944 bnx2x_free_tx_skbs(bp);
5945 bnx2x_free_rx_skbs(bp);
5948 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5952 free_irq(bp->msix_table[0].vector, bp->dev);
5953 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
5954 bp->msix_table[0].vector);
5956 for_each_queue(bp, i) {
5957 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
5958 "state(%x)\n", i, bp->msix_table[i + 1].vector,
5959 bnx2x_fp(bp, i, state));
5961 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
5963 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
5964 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
5967 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
5973 static void bnx2x_free_irq(struct bnx2x *bp)
5976 if (bp->flags & USING_MSIX_FLAG) {
5978 bnx2x_free_msix_irqs(bp);
5979 pci_disable_msix(bp->pdev);
5981 bp->flags &= ~USING_MSIX_FLAG;
5984 free_irq(bp->pdev->irq, bp->dev);
5987 static int bnx2x_enable_msix(struct bnx2x *bp)
5992 bp->msix_table[0].entry = 0;
5993 for_each_queue(bp, i)
5994 bp->msix_table[i + 1].entry = i + 1;
5996 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
5997 bp->num_queues + 1)){
5998 BNX2X_ERR("failed to enable msix\n");
6003 bp->flags |= USING_MSIX_FLAG;
6010 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6015 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6017 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6018 bp->dev->name, bp->dev);
6021 BNX2X_ERR("request sp irq failed\n");
6025 for_each_queue(bp, i) {
6026 rc = request_irq(bp->msix_table[i + 1].vector,
6027 bnx2x_msix_fp_int, 0,
6028 bp->dev->name, &bp->fp[i]);
6031 BNX2X_ERR("request fp #%d irq failed\n", i);
6032 bnx2x_free_msix_irqs(bp);
6036 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6044 static int bnx2x_req_irq(struct bnx2x *bp)
6047 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6048 IRQF_SHARED, bp->dev->name, bp->dev);
6050 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6057 * Init service functions
6060 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6062 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6065 * unicasts 0-31:port0 32-63:port1
6066 * multicast 64-127:port0 128-191:port1
6068 config->hdr.length_6b = 2;
6069 config->hdr.offset = bp->port ? 31 : 0;
6070 config->hdr.reserved0 = 0;
6071 config->hdr.reserved1 = 0;
6074 config->config_table[0].cam_entry.msb_mac_addr =
6075 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6076 config->config_table[0].cam_entry.middle_mac_addr =
6077 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6078 config->config_table[0].cam_entry.lsb_mac_addr =
6079 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6080 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6081 config->config_table[0].target_table_entry.flags = 0;
6082 config->config_table[0].target_table_entry.client_id = 0;
6083 config->config_table[0].target_table_entry.vlan_id = 0;
6085 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6086 config->config_table[0].cam_entry.msb_mac_addr,
6087 config->config_table[0].cam_entry.middle_mac_addr,
6088 config->config_table[0].cam_entry.lsb_mac_addr);
6091 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6092 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6093 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6094 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6095 config->config_table[1].target_table_entry.flags =
6096 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6097 config->config_table[1].target_table_entry.client_id = 0;
6098 config->config_table[1].target_table_entry.vlan_id = 0;
6100 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6101 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6102 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6105 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6106 int *state_p, int poll)
6108 /* can take a while if any port is running */
6111 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6112 poll ? "polling" : "waiting", state, idx);
6119 bnx2x_rx_int(bp->fp, 10);
6120 /* If index is different from 0
6121 * The reply for some commands will
6122 * be on the none default queue
6125 bnx2x_rx_int(&bp->fp[idx], 10);
6128 mb(); /* state is changed by bnx2x_sp_event()*/
6130 if (*state_p != state)
6139 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
6144 static int bnx2x_setup_leading(struct bnx2x *bp)
6147 /* reset IGU state */
6148 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6151 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6153 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6157 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6160 /* reset IGU state */
6161 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6163 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6164 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6166 /* Wait for completion */
6167 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6168 &(bp->fp[index].state), 1);
6173 static int bnx2x_poll(struct napi_struct *napi, int budget);
6174 static void bnx2x_set_rx_mode(struct net_device *dev);
6176 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
6181 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6183 /* Send LOAD_REQUEST command to MCP.
6184 Returns the type of LOAD command: if it is the
6185 first port to be initialized common blocks should be
6186 initialized, otherwise - not.
6189 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6190 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6191 return -EBUSY; /* other port in diagnostic mode */
6194 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
6197 /* if we can't use msix we only need one fp,
6198 * so try to enable msix with the requested number of fp's
6199 * and fallback to inta with one fp
6205 if ((use_multi > 1) && (use_multi <= 16))
6206 /* user requested number */
6207 bp->num_queues = use_multi;
6208 else if (use_multi == 1)
6209 bp->num_queues = num_online_cpus();
6213 if (bnx2x_enable_msix(bp)) {
6214 /* failed to enable msix */
6217 BNX2X_ERR("Multi requested but failed"
6218 " to enable MSI-X\n");
6223 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
6225 if (bnx2x_alloc_mem(bp))
6229 if (bp->flags & USING_MSIX_FLAG) {
6230 if (bnx2x_req_msix_irqs(bp)) {
6231 pci_disable_msix(bp->pdev);
6236 if (bnx2x_req_irq(bp)) {
6237 BNX2X_ERR("IRQ request failed, aborting\n");
6243 for_each_queue(bp, i)
6244 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6249 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
6250 BNX2X_ERR("HW init failed, aborting\n");
6255 atomic_set(&bp->intr_sem, 0);
6258 /* Setup NIC internals and enable interrupts */
6261 /* Send LOAD_DONE command to MCP */
6263 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6264 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
6266 BNX2X_ERR("MCP response failure, unloading\n");
6271 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6273 /* Enable Rx interrupt handling before sending the ramrod
6274 as it's completed on Rx FP queue */
6275 for_each_queue(bp, i)
6276 napi_enable(&bnx2x_fp(bp, i, napi));
6278 if (bnx2x_setup_leading(bp))
6281 for_each_nondefault_queue(bp, i)
6282 if (bnx2x_setup_multi(bp, i))
6285 bnx2x_set_mac_addr(bp);
6289 /* Start fast path */
6290 if (req_irq) { /* IRQ is only requested from bnx2x_open */
6291 netif_start_queue(bp->dev);
6292 if (bp->flags & USING_MSIX_FLAG)
6293 printk(KERN_INFO PFX "%s: using MSI-X\n",
6296 /* Otherwise Tx queue should be only reenabled */
6297 } else if (netif_running(bp->dev)) {
6298 netif_wake_queue(bp->dev);
6299 bnx2x_set_rx_mode(bp->dev);
6302 /* start the timer */
6303 mod_timer(&bp->timer, jiffies + bp->current_interval);
6308 for_each_queue(bp, i)
6309 napi_disable(&bnx2x_fp(bp, i, napi));
6312 bnx2x_disable_int_sync(bp);
6314 bnx2x_free_skbs(bp);
6320 /* TBD we really need to reset the chip
6321 if we want to recover from this */
6325 static void bnx2x_netif_stop(struct bnx2x *bp)
6329 bp->rx_mode = BNX2X_RX_MODE_NONE;
6330 bnx2x_set_storm_rx_mode(bp);
6332 bnx2x_disable_int_sync(bp);
6333 bnx2x_link_reset(bp);
6335 for_each_queue(bp, i)
6336 napi_disable(&bnx2x_fp(bp, i, napi));
6338 if (netif_running(bp->dev)) {
6339 netif_tx_disable(bp->dev);
6340 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6344 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6346 int port = bp->port;
6352 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
6354 /* Do not rcv packets to BRB */
6355 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6356 /* Do not direct rcv packets that are not for MCP to the BRB */
6357 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6358 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6360 /* Configure IGU and AEU */
6361 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6362 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6364 /* TODO: Close Doorbell port? */
6371 base = port * RQ_ONCHIP_AT_PORT_SIZE;
6372 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
6374 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6376 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
6377 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
6381 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6383 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6385 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6390 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6395 /* halt the connection */
6396 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
6400 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6401 &(bp->fp[index].state), 1);
6402 if (rc) /* timeout */
6405 /* delete cfc entry */
6406 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6408 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
6409 &(bp->fp[index].state), 1);
6414 static void bnx2x_stop_leading(struct bnx2x *bp)
6417 /* if the other port is handling traffic,
6418 this can take a lot of time */
6423 /* Send HALT ramrod */
6424 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6425 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
6427 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6428 &(bp->fp[0].state), 1))
6431 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
6433 /* Send CFC_DELETE ramrod */
6434 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6437 Wait for completion.
6438 we are going to reset the chip anyway
6439 so there is not much to do if this times out
6441 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
6448 static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
6454 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6456 /* Calling flush_scheduled_work() may deadlock because
6457 * linkwatch_event() may be on the workqueue and it will try to get
6458 * the rtnl_lock which we are holding.
6461 while (bp->in_reset_task)
6464 /* Delete the timer: do it before disabling interrupts, as it
6465 may be still STAT_QUERY ramrod pending after stopping the timer */
6466 del_timer_sync(&bp->timer);
6468 /* Wait until stat ramrod returns and all SP tasks complete */
6469 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
6472 /* Stop fast path, disable MAC, disable interrupts, disable napi */
6473 bnx2x_netif_stop(bp);
6475 if (bp->flags & NO_WOL_FLAG)
6476 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6478 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
6479 u8 *mac_addr = bp->dev->dev_addr;
6480 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
6481 EMAC_MODE_ACPI_RCVD);
6483 EMAC_WR(EMAC_REG_EMAC_MODE, val);
6485 val = (mac_addr[0] << 8) | mac_addr[1];
6486 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
6488 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6489 (mac_addr[4] << 8) | mac_addr[5];
6490 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
6492 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6494 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6496 for_each_nondefault_queue(bp, i)
6497 if (bnx2x_stop_multi(bp, i))
6501 bnx2x_stop_leading(bp);
6505 rc = bnx2x_fw_command(bp, reset_code);
6507 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6513 /* Reset the chip */
6514 bnx2x_reset_chip(bp, rc);
6516 /* Report UNLOAD_DONE to MCP */
6518 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6520 /* Free SKBs and driver internals */
6521 bnx2x_free_skbs(bp);
6524 bp->state = BNX2X_STATE_CLOSED;
6527 netif_carrier_off(bp->dev);
6532 /* end of nic load/unload */
6537 * Init service functions
6540 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
6542 int port = bp->port;
6547 switch (switch_cfg) {
6549 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6551 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
6552 switch (ext_phy_type) {
6553 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6554 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6557 bp->supported |= (SUPPORTED_10baseT_Half |
6558 SUPPORTED_10baseT_Full |
6559 SUPPORTED_100baseT_Half |
6560 SUPPORTED_100baseT_Full |
6561 SUPPORTED_1000baseT_Full |
6562 SUPPORTED_2500baseT_Full |
6563 SUPPORTED_TP | SUPPORTED_FIBRE |
6566 SUPPORTED_Asym_Pause);
6569 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6570 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6573 bp->phy_flags |= PHY_SGMII_FLAG;
6575 bp->supported |= (/* SUPPORTED_10baseT_Half |
6576 SUPPORTED_10baseT_Full |
6577 SUPPORTED_100baseT_Half |
6578 SUPPORTED_100baseT_Full |*/
6579 SUPPORTED_1000baseT_Full |
6580 SUPPORTED_TP | SUPPORTED_FIBRE |
6583 SUPPORTED_Asym_Pause);
6587 BNX2X_ERR("NVRAM config error. "
6588 "BAD SerDes ext_phy_config 0x%x\n",
6589 bp->ext_phy_config);
6593 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6595 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6598 case SWITCH_CFG_10G:
6599 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6601 bp->phy_flags |= PHY_XGXS_FLAG;
6603 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6604 switch (ext_phy_type) {
6605 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6606 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6609 bp->supported |= (SUPPORTED_10baseT_Half |
6610 SUPPORTED_10baseT_Full |
6611 SUPPORTED_100baseT_Half |
6612 SUPPORTED_100baseT_Full |
6613 SUPPORTED_1000baseT_Full |
6614 SUPPORTED_2500baseT_Full |
6615 SUPPORTED_10000baseT_Full |
6616 SUPPORTED_TP | SUPPORTED_FIBRE |
6619 SUPPORTED_Asym_Pause);
6622 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6623 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6624 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705/6)\n",
6627 bp->supported |= (SUPPORTED_10000baseT_Full |
6630 SUPPORTED_Asym_Pause);
6634 BNX2X_ERR("NVRAM config error. "
6635 "BAD XGXS ext_phy_config 0x%x\n",
6636 bp->ext_phy_config);
6640 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6642 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
6644 bp->ser_lane = ((bp->lane_config &
6645 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
6646 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
6647 bp->rx_lane_swap = ((bp->lane_config &
6648 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
6649 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
6650 bp->tx_lane_swap = ((bp->lane_config &
6651 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
6652 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
6653 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
6654 bp->rx_lane_swap, bp->tx_lane_swap);
6658 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6663 /* mask what we support according to speed_cap_mask */
6664 if (!(bp->speed_cap_mask &
6665 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6666 bp->supported &= ~SUPPORTED_10baseT_Half;
6668 if (!(bp->speed_cap_mask &
6669 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6670 bp->supported &= ~SUPPORTED_10baseT_Full;
6672 if (!(bp->speed_cap_mask &
6673 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6674 bp->supported &= ~SUPPORTED_100baseT_Half;
6676 if (!(bp->speed_cap_mask &
6677 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6678 bp->supported &= ~SUPPORTED_100baseT_Full;
6680 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6681 bp->supported &= ~(SUPPORTED_1000baseT_Half |
6682 SUPPORTED_1000baseT_Full);
6684 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6685 bp->supported &= ~SUPPORTED_2500baseT_Full;
6687 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6688 bp->supported &= ~SUPPORTED_10000baseT_Full;
6690 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
6693 static void bnx2x_link_settings_requested(struct bnx2x *bp)
6695 bp->req_autoneg = 0;
6696 bp->req_duplex = DUPLEX_FULL;
6698 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6699 case PORT_FEATURE_LINK_SPEED_AUTO:
6700 if (bp->supported & SUPPORTED_Autoneg) {
6701 bp->req_autoneg |= AUTONEG_SPEED;
6702 bp->req_line_speed = 0;
6703 bp->advertising = bp->supported;
6707 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
6708 if ((ext_phy_type ==
6709 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6711 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
6712 /* force 10G, no AN */
6713 bp->req_line_speed = SPEED_10000;
6715 (ADVERTISED_10000baseT_Full |
6719 BNX2X_ERR("NVRAM config error. "
6720 "Invalid link_config 0x%x"
6721 " Autoneg not supported\n",
6727 case PORT_FEATURE_LINK_SPEED_10M_FULL:
6728 if (bp->speed_cap_mask &
6729 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
6730 bp->req_line_speed = SPEED_10;
6731 bp->advertising = (ADVERTISED_10baseT_Full |
6734 BNX2X_ERR("NVRAM config error. "
6735 "Invalid link_config 0x%x"
6736 " speed_cap_mask 0x%x\n",
6737 bp->link_config, bp->speed_cap_mask);
6742 case PORT_FEATURE_LINK_SPEED_10M_HALF:
6743 if (bp->speed_cap_mask &
6744 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
6745 bp->req_line_speed = SPEED_10;
6746 bp->req_duplex = DUPLEX_HALF;
6747 bp->advertising = (ADVERTISED_10baseT_Half |
6750 BNX2X_ERR("NVRAM config error. "
6751 "Invalid link_config 0x%x"
6752 " speed_cap_mask 0x%x\n",
6753 bp->link_config, bp->speed_cap_mask);
6758 case PORT_FEATURE_LINK_SPEED_100M_FULL:
6759 if (bp->speed_cap_mask &
6760 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
6761 bp->req_line_speed = SPEED_100;
6762 bp->advertising = (ADVERTISED_100baseT_Full |
6765 BNX2X_ERR("NVRAM config error. "
6766 "Invalid link_config 0x%x"
6767 " speed_cap_mask 0x%x\n",
6768 bp->link_config, bp->speed_cap_mask);
6773 case PORT_FEATURE_LINK_SPEED_100M_HALF:
6774 if (bp->speed_cap_mask &
6775 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
6776 bp->req_line_speed = SPEED_100;
6777 bp->req_duplex = DUPLEX_HALF;
6778 bp->advertising = (ADVERTISED_100baseT_Half |
6781 BNX2X_ERR("NVRAM config error. "
6782 "Invalid link_config 0x%x"
6783 " speed_cap_mask 0x%x\n",
6784 bp->link_config, bp->speed_cap_mask);
6789 case PORT_FEATURE_LINK_SPEED_1G:
6790 if (bp->speed_cap_mask &
6791 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) {
6792 bp->req_line_speed = SPEED_1000;
6793 bp->advertising = (ADVERTISED_1000baseT_Full |
6796 BNX2X_ERR("NVRAM config error. "
6797 "Invalid link_config 0x%x"
6798 " speed_cap_mask 0x%x\n",
6799 bp->link_config, bp->speed_cap_mask);
6804 case PORT_FEATURE_LINK_SPEED_2_5G:
6805 if (bp->speed_cap_mask &
6806 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) {
6807 bp->req_line_speed = SPEED_2500;
6808 bp->advertising = (ADVERTISED_2500baseT_Full |
6811 BNX2X_ERR("NVRAM config error. "
6812 "Invalid link_config 0x%x"
6813 " speed_cap_mask 0x%x\n",
6814 bp->link_config, bp->speed_cap_mask);
6819 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6820 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6821 case PORT_FEATURE_LINK_SPEED_10G_KR:
6822 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
6823 BNX2X_ERR("NVRAM config error. "
6824 "Invalid link_config 0x%x"
6825 " phy_flags 0x%x\n",
6826 bp->link_config, bp->phy_flags);
6829 if (bp->speed_cap_mask &
6830 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
6831 bp->req_line_speed = SPEED_10000;
6832 bp->advertising = (ADVERTISED_10000baseT_Full |
6835 BNX2X_ERR("NVRAM config error. "
6836 "Invalid link_config 0x%x"
6837 " speed_cap_mask 0x%x\n",
6838 bp->link_config, bp->speed_cap_mask);
6844 BNX2X_ERR("NVRAM config error. "
6845 "BAD link speed link_config 0x%x\n",
6847 bp->req_autoneg |= AUTONEG_SPEED;
6848 bp->req_line_speed = 0;
6849 bp->advertising = bp->supported;
6852 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
6853 bp->req_line_speed, bp->req_duplex);
6855 bp->req_flow_ctrl = (bp->link_config &
6856 PORT_FEATURE_FLOW_CONTROL_MASK);
6857 /* Please refer to Table 28B-3 of the 802.3ab-1999 spec */
6858 switch (bp->req_flow_ctrl) {
6859 case FLOW_CTRL_AUTO:
6860 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
6861 if (bp->dev->mtu <= 4500) {
6862 bp->pause_mode = PAUSE_BOTH;
6863 bp->advertising |= (ADVERTISED_Pause |
6864 ADVERTISED_Asym_Pause);
6866 bp->pause_mode = PAUSE_ASYMMETRIC;
6867 bp->advertising |= ADVERTISED_Asym_Pause;
6872 bp->pause_mode = PAUSE_ASYMMETRIC;
6873 bp->advertising |= ADVERTISED_Asym_Pause;
6877 case FLOW_CTRL_BOTH:
6878 bp->pause_mode = PAUSE_BOTH;
6879 bp->advertising |= (ADVERTISED_Pause |
6880 ADVERTISED_Asym_Pause);
6883 case FLOW_CTRL_NONE:
6885 bp->pause_mode = PAUSE_NONE;
6886 bp->advertising &= ~(ADVERTISED_Pause |
6887 ADVERTISED_Asym_Pause);
6890 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x\n"
6891 KERN_INFO " pause_mode %d advertising 0x%x\n",
6892 bp->req_autoneg, bp->req_flow_ctrl,
6893 bp->pause_mode, bp->advertising);
6896 static void bnx2x_get_hwinfo(struct bnx2x *bp)
6898 u32 val, val2, val3, val4, id;
6899 int port = bp->port;
6902 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6903 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
6905 /* Get the chip revision id and number. */
6906 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6907 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6908 id = ((val & 0xffff) << 16);
6909 val = REG_RD(bp, MISC_REG_CHIP_REV);
6910 id |= ((val & 0xf) << 12);
6911 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6912 id |= ((val & 0xff) << 4);
6913 REG_RD(bp, MISC_REG_BOND_ID);
6916 BNX2X_DEV_INFO("chip ID is %x\n", id);
6918 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
6919 BNX2X_DEV_INFO("MCP not active\n");
6924 val = SHMEM_RD(bp, validity_map[port]);
6925 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6926 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6927 BNX2X_ERR("MCP validity signature bad\n");
6929 bp->fw_seq = (SHMEM_RD(bp, drv_fw_mb[port].drv_mb_header) &
6930 DRV_MSG_SEQ_NUMBER_MASK);
6932 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6935 SHMEM_RD(bp, dev_info.port_hw_config[bp->port].serdes_config);
6937 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6938 bp->ext_phy_config =
6940 dev_info.port_hw_config[port].external_phy_config);
6941 bp->speed_cap_mask =
6943 dev_info.port_hw_config[port].speed_capability_mask);
6946 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6948 BNX2X_DEV_INFO("hw_config (%08x) serdes_config (%08x)\n"
6949 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
6950 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
6952 bp->hw_config, bp->serdes_config, bp->lane_config,
6953 bp->ext_phy_config, bp->speed_cap_mask,
6954 bp->link_config, bp->fw_seq);
6956 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
6957 bnx2x_link_settings_supported(bp, switch_cfg);
6959 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
6960 /* for now disable cl73 */
6961 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
6962 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
6964 bnx2x_link_settings_requested(bp);
6966 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6967 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6968 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6969 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6970 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6971 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6972 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6973 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6975 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
6978 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6979 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6980 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6981 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6983 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6984 val, val2, val3, val4);
6988 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
6989 BNX2X_DEV_INFO("bc_ver %X\n", val);
6990 if (val < BNX2X_BC_VER) {
6991 /* for now only warn
6992 * later we might need to enforce this */
6993 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6994 " please upgrade BC\n", BNX2X_BC_VER, val);
7000 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7001 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7002 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7003 bp->flash_size, bp->flash_size);
7007 set_mac: /* only supposed to happen on emulation/FPGA */
7008 BNX2X_ERR("warning constant MAC workaround active\n");
7009 bp->dev->dev_addr[0] = 0;
7010 bp->dev->dev_addr[1] = 0x50;
7011 bp->dev->dev_addr[2] = 0xc2;
7012 bp->dev->dev_addr[3] = 0x2c;
7013 bp->dev->dev_addr[4] = 0x71;
7014 bp->dev->dev_addr[5] = port ? 0x0d : 0x0e;
7016 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7021 * ethtool service functions
7024 /* All ethtool functions called with rtnl_lock */
7026 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7028 struct bnx2x *bp = netdev_priv(dev);
7030 cmd->supported = bp->supported;
7031 cmd->advertising = bp->advertising;
7033 if (netif_carrier_ok(dev)) {
7034 cmd->speed = bp->line_speed;
7035 cmd->duplex = bp->duplex;
7037 cmd->speed = bp->req_line_speed;
7038 cmd->duplex = bp->req_duplex;
7041 if (bp->phy_flags & PHY_XGXS_FLAG) {
7042 cmd->port = PORT_FIBRE;
7044 cmd->port = PORT_TP;
7047 cmd->phy_address = bp->phy_addr;
7048 cmd->transceiver = XCVR_INTERNAL;
7050 if (bp->req_autoneg & AUTONEG_SPEED) {
7051 cmd->autoneg = AUTONEG_ENABLE;
7053 cmd->autoneg = AUTONEG_DISABLE;
7059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7070 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7072 struct bnx2x *bp = netdev_priv(dev);
7075 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7076 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7077 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7078 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7079 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7080 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7081 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7083 switch (cmd->port) {
7085 if (!(bp->supported & SUPPORTED_TP))
7088 if (bp->phy_flags & PHY_XGXS_FLAG) {
7089 bnx2x_link_reset(bp);
7090 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7091 bnx2x_phy_deassert(bp);
7096 if (!(bp->supported & SUPPORTED_FIBRE))
7099 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7100 bnx2x_link_reset(bp);
7101 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7102 bnx2x_phy_deassert(bp);
7110 if (cmd->autoneg == AUTONEG_ENABLE) {
7111 if (!(bp->supported & SUPPORTED_Autoneg))
7114 /* advertise the requested speed and duplex if supported */
7115 cmd->advertising &= bp->supported;
7117 bp->req_autoneg |= AUTONEG_SPEED;
7118 bp->req_line_speed = 0;
7119 bp->req_duplex = DUPLEX_FULL;
7120 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7122 } else { /* forced speed */
7123 /* advertise the requested speed and duplex if supported */
7124 switch (cmd->speed) {
7126 if (cmd->duplex == DUPLEX_FULL) {
7127 if (!(bp->supported & SUPPORTED_10baseT_Full))
7130 advertising = (ADVERTISED_10baseT_Full |
7133 if (!(bp->supported & SUPPORTED_10baseT_Half))
7136 advertising = (ADVERTISED_10baseT_Half |
7142 if (cmd->duplex == DUPLEX_FULL) {
7143 if (!(bp->supported &
7144 SUPPORTED_100baseT_Full))
7147 advertising = (ADVERTISED_100baseT_Full |
7150 if (!(bp->supported &
7151 SUPPORTED_100baseT_Half))
7154 advertising = (ADVERTISED_100baseT_Half |
7160 if (cmd->duplex != DUPLEX_FULL)
7163 if (!(bp->supported & SUPPORTED_1000baseT_Full))
7166 advertising = (ADVERTISED_1000baseT_Full |
7171 if (cmd->duplex != DUPLEX_FULL)
7174 if (!(bp->supported & SUPPORTED_2500baseT_Full))
7177 advertising = (ADVERTISED_2500baseT_Full |
7182 if (cmd->duplex != DUPLEX_FULL)
7185 if (!(bp->supported & SUPPORTED_10000baseT_Full))
7188 advertising = (ADVERTISED_10000baseT_Full |
7196 bp->req_autoneg &= ~AUTONEG_SPEED;
7197 bp->req_line_speed = cmd->speed;
7198 bp->req_duplex = cmd->duplex;
7199 bp->advertising = advertising;
7202 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
7203 DP_LEVEL " req_duplex %d advertising 0x%x\n",
7204 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
7207 bnx2x_stop_stats(bp);
7208 bnx2x_link_initialize(bp);
7213 static void bnx2x_get_drvinfo(struct net_device *dev,
7214 struct ethtool_drvinfo *info)
7216 struct bnx2x *bp = netdev_priv(dev);
7218 strcpy(info->driver, DRV_MODULE_NAME);
7219 strcpy(info->version, DRV_MODULE_VERSION);
7220 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
7221 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
7222 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
7224 strcpy(info->bus_info, pci_name(bp->pdev));
7225 info->n_stats = BNX2X_NUM_STATS;
7226 info->testinfo_len = BNX2X_NUM_TESTS;
7227 info->eedump_len = bp->flash_size;
7228 info->regdump_len = 0;
7231 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7233 struct bnx2x *bp = netdev_priv(dev);
7235 if (bp->flags & NO_WOL_FLAG) {
7239 wol->supported = WAKE_MAGIC;
7241 wol->wolopts = WAKE_MAGIC;
7245 memset(&wol->sopass, 0, sizeof(wol->sopass));
7248 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7250 struct bnx2x *bp = netdev_priv(dev);
7252 if (wol->wolopts & ~WAKE_MAGIC)
7255 if (wol->wolopts & WAKE_MAGIC) {
7256 if (bp->flags & NO_WOL_FLAG)
7266 static u32 bnx2x_get_msglevel(struct net_device *dev)
7268 struct bnx2x *bp = netdev_priv(dev);
7270 return bp->msglevel;
7273 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7275 struct bnx2x *bp = netdev_priv(dev);
7277 if (capable(CAP_NET_ADMIN))
7278 bp->msglevel = level;
7281 static int bnx2x_nway_reset(struct net_device *dev)
7283 struct bnx2x *bp = netdev_priv(dev);
7285 if (bp->state != BNX2X_STATE_OPEN) {
7286 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7290 bnx2x_stop_stats(bp);
7291 bnx2x_link_initialize(bp);
7296 static int bnx2x_get_eeprom_len(struct net_device *dev)
7298 struct bnx2x *bp = netdev_priv(dev);
7300 return bp->flash_size;
7303 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7305 int port = bp->port;
7309 /* adjust timeout for emulation/FPGA */
7310 count = NVRAM_TIMEOUT_COUNT;
7311 if (CHIP_REV_IS_SLOW(bp))
7314 /* request access to nvram interface */
7315 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7316 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7318 for (i = 0; i < count*10; i++) {
7319 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7320 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7326 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
7327 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
7334 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7336 int port = bp->port;
7340 /* adjust timeout for emulation/FPGA */
7341 count = NVRAM_TIMEOUT_COUNT;
7342 if (CHIP_REV_IS_SLOW(bp))
7345 /* relinquish nvram interface */
7346 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7347 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7349 for (i = 0; i < count*10; i++) {
7350 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7351 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7357 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
7358 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
7365 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7369 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7371 /* enable both bits, even on read */
7372 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7373 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7374 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7377 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7381 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7383 /* disable both bits, even after read */
7384 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7385 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7386 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7389 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7396 /* build the command word */
7397 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7399 /* need to clear DONE bit separately */
7400 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7402 /* address of the NVRAM to read from */
7403 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7404 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7406 /* issue a read command */
7407 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7409 /* adjust timeout for emulation/FPGA */
7410 count = NVRAM_TIMEOUT_COUNT;
7411 if (CHIP_REV_IS_SLOW(bp))
7414 /* wait for completion */
7417 for (i = 0; i < count; i++) {
7419 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7421 if (val & MCPR_NVM_COMMAND_DONE) {
7422 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
7423 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7424 /* we read nvram data in cpu order
7425 * but ethtool sees it as an array of bytes
7426 * converting to big-endian will do the work */
7427 val = cpu_to_be32(val);
7437 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7444 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7446 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7451 if (offset + buf_size > bp->flash_size) {
7452 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7453 " buf_size (0x%x) > flash_size (0x%x)\n",
7454 offset, buf_size, bp->flash_size);
7458 /* request access to nvram interface */
7459 rc = bnx2x_acquire_nvram_lock(bp);
7463 /* enable access to nvram interface */
7464 bnx2x_enable_nvram_access(bp);
7466 /* read the first word(s) */
7467 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7468 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7469 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7470 memcpy(ret_buf, &val, 4);
7472 /* advance to the next dword */
7473 offset += sizeof(u32);
7474 ret_buf += sizeof(u32);
7475 buf_size -= sizeof(u32);
7480 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7481 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7482 memcpy(ret_buf, &val, 4);
7485 /* disable access to nvram interface */
7486 bnx2x_disable_nvram_access(bp);
7487 bnx2x_release_nvram_lock(bp);
7492 static int bnx2x_get_eeprom(struct net_device *dev,
7493 struct ethtool_eeprom *eeprom, u8 *eebuf)
7495 struct bnx2x *bp = netdev_priv(dev);
7498 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7499 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7500 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7501 eeprom->len, eeprom->len);
7503 /* parameters already validated in ethtool_get_eeprom */
7505 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7510 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7516 /* build the command word */
7517 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7519 /* need to clear DONE bit separately */
7520 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7522 /* write the data */
7523 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7525 /* address of the NVRAM to write to */
7526 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7527 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7529 /* issue the write command */
7530 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7532 /* adjust timeout for emulation/FPGA */
7533 count = NVRAM_TIMEOUT_COUNT;
7534 if (CHIP_REV_IS_SLOW(bp))
7537 /* wait for completion */
7539 for (i = 0; i < count; i++) {
7541 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7542 if (val & MCPR_NVM_COMMAND_DONE) {
7551 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
7553 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7561 if (offset + buf_size > bp->flash_size) {
7562 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7563 " buf_size (0x%x) > flash_size (0x%x)\n",
7564 offset, buf_size, bp->flash_size);
7568 /* request access to nvram interface */
7569 rc = bnx2x_acquire_nvram_lock(bp);
7573 /* enable access to nvram interface */
7574 bnx2x_enable_nvram_access(bp);
7576 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
7577 align_offset = (offset & ~0x03);
7578 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
7581 val &= ~(0xff << BYTE_OFFSET(offset));
7582 val |= (*data_buf << BYTE_OFFSET(offset));
7584 /* nvram data is returned as an array of bytes
7585 * convert it back to cpu order */
7586 val = be32_to_cpu(val);
7588 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7590 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
7594 /* disable access to nvram interface */
7595 bnx2x_disable_nvram_access(bp);
7596 bnx2x_release_nvram_lock(bp);
7601 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7609 if (buf_size == 1) { /* ethtool */
7610 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
7613 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
7615 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
7620 if (offset + buf_size > bp->flash_size) {
7621 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
7622 " buf_size (0x%x) > flash_size (0x%x)\n",
7623 offset, buf_size, bp->flash_size);
7627 /* request access to nvram interface */
7628 rc = bnx2x_acquire_nvram_lock(bp);
7632 /* enable access to nvram interface */
7633 bnx2x_enable_nvram_access(bp);
7636 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7637 while ((written_so_far < buf_size) && (rc == 0)) {
7638 if (written_so_far == (buf_size - sizeof(u32)))
7639 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7640 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
7641 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7642 else if ((offset % NVRAM_PAGE_SIZE) == 0)
7643 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
7645 memcpy(&val, data_buf, 4);
7646 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
7648 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
7650 /* advance to the next dword */
7651 offset += sizeof(u32);
7652 data_buf += sizeof(u32);
7653 written_so_far += sizeof(u32);
7657 /* disable access to nvram interface */
7658 bnx2x_disable_nvram_access(bp);
7659 bnx2x_release_nvram_lock(bp);
7664 static int bnx2x_set_eeprom(struct net_device *dev,
7665 struct ethtool_eeprom *eeprom, u8 *eebuf)
7667 struct bnx2x *bp = netdev_priv(dev);
7670 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
7671 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7672 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7673 eeprom->len, eeprom->len);
7675 /* parameters already validated in ethtool_set_eeprom */
7677 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7682 static int bnx2x_get_coalesce(struct net_device *dev,
7683 struct ethtool_coalesce *coal)
7685 struct bnx2x *bp = netdev_priv(dev);
7687 memset(coal, 0, sizeof(struct ethtool_coalesce));
7689 coal->rx_coalesce_usecs = bp->rx_ticks;
7690 coal->tx_coalesce_usecs = bp->tx_ticks;
7691 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7696 static int bnx2x_set_coalesce(struct net_device *dev,
7697 struct ethtool_coalesce *coal)
7699 struct bnx2x *bp = netdev_priv(dev);
7701 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7702 if (bp->rx_ticks > 3000)
7703 bp->rx_ticks = 3000;
7705 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7706 if (bp->tx_ticks > 0x3000)
7707 bp->tx_ticks = 0x3000;
7709 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7710 if (bp->stats_ticks > 0xffff00)
7711 bp->stats_ticks = 0xffff00;
7712 bp->stats_ticks &= 0xffff00;
7714 if (netif_running(bp->dev))
7715 bnx2x_update_coalesce(bp);
7720 static void bnx2x_get_ringparam(struct net_device *dev,
7721 struct ethtool_ringparam *ering)
7723 struct bnx2x *bp = netdev_priv(dev);
7725 ering->rx_max_pending = MAX_RX_AVAIL;
7726 ering->rx_mini_max_pending = 0;
7727 ering->rx_jumbo_max_pending = 0;
7729 ering->rx_pending = bp->rx_ring_size;
7730 ering->rx_mini_pending = 0;
7731 ering->rx_jumbo_pending = 0;
7733 ering->tx_max_pending = MAX_TX_AVAIL;
7734 ering->tx_pending = bp->tx_ring_size;
7737 static int bnx2x_set_ringparam(struct net_device *dev,
7738 struct ethtool_ringparam *ering)
7740 struct bnx2x *bp = netdev_priv(dev);
7742 if ((ering->rx_pending > MAX_RX_AVAIL) ||
7743 (ering->tx_pending > MAX_TX_AVAIL) ||
7744 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
7747 bp->rx_ring_size = ering->rx_pending;
7748 bp->tx_ring_size = ering->tx_pending;
7750 if (netif_running(bp->dev)) {
7751 bnx2x_nic_unload(bp, 0);
7752 bnx2x_nic_load(bp, 0);
7758 static void bnx2x_get_pauseparam(struct net_device *dev,
7759 struct ethtool_pauseparam *epause)
7761 struct bnx2x *bp = netdev_priv(dev);
7764 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
7765 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
7766 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
7768 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7769 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7770 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7773 static int bnx2x_set_pauseparam(struct net_device *dev,
7774 struct ethtool_pauseparam *epause)
7776 struct bnx2x *bp = netdev_priv(dev);
7778 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7779 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7780 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7782 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
7783 if (epause->autoneg) {
7784 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7785 if (bp->dev->mtu <= 4500) {
7786 bp->pause_mode = PAUSE_BOTH;
7787 bp->advertising |= (ADVERTISED_Pause |
7788 ADVERTISED_Asym_Pause);
7790 bp->pause_mode = PAUSE_ASYMMETRIC;
7791 bp->advertising |= ADVERTISED_Asym_Pause;
7795 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
7797 if (epause->rx_pause)
7798 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7799 if (epause->tx_pause)
7800 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7802 switch (bp->req_flow_ctrl) {
7803 case FLOW_CTRL_AUTO:
7804 bp->req_flow_ctrl = FLOW_CTRL_NONE;
7805 bp->pause_mode = PAUSE_NONE;
7806 bp->advertising &= ~(ADVERTISED_Pause |
7807 ADVERTISED_Asym_Pause);
7811 bp->pause_mode = PAUSE_ASYMMETRIC;
7812 bp->advertising |= ADVERTISED_Asym_Pause;
7816 case FLOW_CTRL_BOTH:
7817 bp->pause_mode = PAUSE_BOTH;
7818 bp->advertising |= (ADVERTISED_Pause |
7819 ADVERTISED_Asym_Pause);
7824 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n"
7825 DP_LEVEL " pause_mode %d advertising 0x%x\n",
7826 bp->req_autoneg, bp->req_flow_ctrl, bp->pause_mode,
7829 bnx2x_stop_stats(bp);
7830 bnx2x_link_initialize(bp);
7835 static u32 bnx2x_get_rx_csum(struct net_device *dev)
7837 struct bnx2x *bp = netdev_priv(dev);
7842 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
7844 struct bnx2x *bp = netdev_priv(dev);
7850 static int bnx2x_set_tso(struct net_device *dev, u32 data)
7853 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7855 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
7860 char string[ETH_GSTRING_LEN];
7861 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
7862 { "MC Errors (online)" }
7865 static int bnx2x_self_test_count(struct net_device *dev)
7867 return BNX2X_NUM_TESTS;
7870 static void bnx2x_self_test(struct net_device *dev,
7871 struct ethtool_test *etest, u64 *buf)
7873 struct bnx2x *bp = netdev_priv(dev);
7876 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
7878 if (bp->state != BNX2X_STATE_OPEN) {
7879 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7883 stats_state = bp->stats_state;
7884 bnx2x_stop_stats(bp);
7886 if (bnx2x_mc_assert(bp) != 0) {
7888 etest->flags |= ETH_TEST_FL_FAILED;
7891 #ifdef BNX2X_EXTRA_DEBUG
7892 bnx2x_panic_dump(bp);
7894 bp->stats_state = stats_state;
7898 char string[ETH_GSTRING_LEN];
7899 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
7900 { "rx_bytes"}, /* 0 */
7901 { "rx_error_bytes"}, /* 1 */
7902 { "tx_bytes"}, /* 2 */
7903 { "tx_error_bytes"}, /* 3 */
7904 { "rx_ucast_packets"}, /* 4 */
7905 { "rx_mcast_packets"}, /* 5 */
7906 { "rx_bcast_packets"}, /* 6 */
7907 { "tx_ucast_packets"}, /* 7 */
7908 { "tx_mcast_packets"}, /* 8 */
7909 { "tx_bcast_packets"}, /* 9 */
7910 { "tx_mac_errors"}, /* 10 */
7911 { "tx_carrier_errors"}, /* 11 */
7912 { "rx_crc_errors"}, /* 12 */
7913 { "rx_align_errors"}, /* 13 */
7914 { "tx_single_collisions"}, /* 14 */
7915 { "tx_multi_collisions"}, /* 15 */
7916 { "tx_deferred"}, /* 16 */
7917 { "tx_excess_collisions"}, /* 17 */
7918 { "tx_late_collisions"}, /* 18 */
7919 { "tx_total_collisions"}, /* 19 */
7920 { "rx_fragments"}, /* 20 */
7921 { "rx_jabbers"}, /* 21 */
7922 { "rx_undersize_packets"}, /* 22 */
7923 { "rx_oversize_packets"}, /* 23 */
7924 { "rx_xon_frames"}, /* 24 */
7925 { "rx_xoff_frames"}, /* 25 */
7926 { "tx_xon_frames"}, /* 26 */
7927 { "tx_xoff_frames"}, /* 27 */
7928 { "rx_mac_ctrl_frames"}, /* 28 */
7929 { "rx_filtered_packets"}, /* 29 */
7930 { "rx_discards"}, /* 30 */
7933 #define STATS_OFFSET32(offset_name) \
7934 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
7936 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
7937 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
7938 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
7939 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
7940 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
7941 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
7942 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
7943 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
7944 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
7945 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
7946 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
7947 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
7948 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
7949 STATS_OFFSET32(crc_receive_errors), /* 12 */
7950 STATS_OFFSET32(alignment_errors), /* 13 */
7951 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
7952 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
7953 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
7954 STATS_OFFSET32(excessive_collision_frames), /* 17 */
7955 STATS_OFFSET32(late_collision_frames), /* 18 */
7956 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
7957 STATS_OFFSET32(runt_packets_received), /* 20 */
7958 STATS_OFFSET32(jabber_packets_received), /* 21 */
7959 STATS_OFFSET32(error_runt_packets_received), /* 22 */
7960 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
7961 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
7962 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
7963 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
7964 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
7965 STATS_OFFSET32(control_frames_received), /* 28 */
7966 STATS_OFFSET32(mac_filter_discard), /* 29 */
7967 STATS_OFFSET32(no_buff_discard), /* 30 */
7970 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
7971 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
7972 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
7973 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
7977 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7979 switch (stringset) {
7981 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
7985 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
7990 static int bnx2x_get_stats_count(struct net_device *dev)
7992 return BNX2X_NUM_STATS;
7995 static void bnx2x_get_ethtool_stats(struct net_device *dev,
7996 struct ethtool_stats *stats, u64 *buf)
7998 struct bnx2x *bp = netdev_priv(dev);
7999 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8002 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8003 if (bnx2x_stats_len_arr[i] == 0) {
8004 /* skip this counter */
8012 if (bnx2x_stats_len_arr[i] == 4) {
8013 /* 4-byte counter */
8014 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8017 /* 8-byte counter */
8018 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8019 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8023 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8025 struct bnx2x *bp = netdev_priv(dev);
8031 for (i = 0; i < (data * 2); i++) {
8033 bnx2x_leds_set(bp, SPEED_1000);
8035 bnx2x_leds_unset(bp);
8037 msleep_interruptible(500);
8038 if (signal_pending(current))
8043 bnx2x_leds_set(bp, bp->line_speed);
8048 static struct ethtool_ops bnx2x_ethtool_ops = {
8049 .get_settings = bnx2x_get_settings,
8050 .set_settings = bnx2x_set_settings,
8051 .get_drvinfo = bnx2x_get_drvinfo,
8052 .get_wol = bnx2x_get_wol,
8053 .set_wol = bnx2x_set_wol,
8054 .get_msglevel = bnx2x_get_msglevel,
8055 .set_msglevel = bnx2x_set_msglevel,
8056 .nway_reset = bnx2x_nway_reset,
8057 .get_link = ethtool_op_get_link,
8058 .get_eeprom_len = bnx2x_get_eeprom_len,
8059 .get_eeprom = bnx2x_get_eeprom,
8060 .set_eeprom = bnx2x_set_eeprom,
8061 .get_coalesce = bnx2x_get_coalesce,
8062 .set_coalesce = bnx2x_set_coalesce,
8063 .get_ringparam = bnx2x_get_ringparam,
8064 .set_ringparam = bnx2x_set_ringparam,
8065 .get_pauseparam = bnx2x_get_pauseparam,
8066 .set_pauseparam = bnx2x_set_pauseparam,
8067 .get_rx_csum = bnx2x_get_rx_csum,
8068 .set_rx_csum = bnx2x_set_rx_csum,
8069 .get_tx_csum = ethtool_op_get_tx_csum,
8070 .set_tx_csum = ethtool_op_set_tx_csum,
8071 .get_sg = ethtool_op_get_sg,
8072 .set_sg = ethtool_op_set_sg,
8073 .get_tso = ethtool_op_get_tso,
8074 .set_tso = bnx2x_set_tso,
8075 .self_test_count = bnx2x_self_test_count,
8076 .self_test = bnx2x_self_test,
8077 .get_strings = bnx2x_get_strings,
8078 .phys_id = bnx2x_phys_id,
8079 .get_stats_count = bnx2x_get_stats_count,
8080 .get_ethtool_stats = bnx2x_get_ethtool_stats
8083 /* end of ethtool_ops */
8085 /****************************************************************************
8086 * General service functions
8087 ****************************************************************************/
8089 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8093 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8097 pci_write_config_word(bp->pdev,
8098 bp->pm_cap + PCI_PM_CTRL,
8099 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8100 PCI_PM_CTRL_PME_STATUS));
8102 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8103 /* delay required during transition out of D3hot */
8108 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8112 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8114 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8117 /* No more memory access after this point until
8118 * device is brought back to D0.
8129 * net_device service functions
8132 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
8133 * from set_multicast.
8135 static void bnx2x_set_rx_mode(struct net_device *dev)
8137 struct bnx2x *bp = netdev_priv(dev);
8138 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8140 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8142 if (dev->flags & IFF_PROMISC)
8143 rx_mode = BNX2X_RX_MODE_PROMISC;
8145 else if ((dev->flags & IFF_ALLMULTI) ||
8146 (dev->mc_count > BNX2X_MAX_MULTICAST))
8147 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8149 else { /* some multicasts */
8151 struct dev_mc_list *mclist;
8152 struct mac_configuration_cmd *config =
8153 bnx2x_sp(bp, mcast_config);
8155 for (i = 0, mclist = dev->mc_list;
8156 mclist && (i < dev->mc_count);
8157 i++, mclist = mclist->next) {
8159 config->config_table[i].cam_entry.msb_mac_addr =
8160 swab16(*(u16 *)&mclist->dmi_addr[0]);
8161 config->config_table[i].cam_entry.middle_mac_addr =
8162 swab16(*(u16 *)&mclist->dmi_addr[2]);
8163 config->config_table[i].cam_entry.lsb_mac_addr =
8164 swab16(*(u16 *)&mclist->dmi_addr[4]);
8165 config->config_table[i].cam_entry.flags =
8166 cpu_to_le16(bp->port);
8167 config->config_table[i].target_table_entry.flags = 0;
8168 config->config_table[i].target_table_entry.
8170 config->config_table[i].target_table_entry.
8174 "setting MCAST[%d] (%04x:%04x:%04x)\n",
8175 i, config->config_table[i].cam_entry.msb_mac_addr,
8176 config->config_table[i].cam_entry.middle_mac_addr,
8177 config->config_table[i].cam_entry.lsb_mac_addr);
8179 old = config->hdr.length_6b;
8181 for (; i < old; i++) {
8182 if (CAM_IS_INVALID(config->config_table[i])) {
8183 i--; /* already invalidated */
8187 CAM_INVALIDATE(config->config_table[i]);
8191 if (CHIP_REV_IS_SLOW(bp))
8192 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
8194 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
8196 config->hdr.length_6b = i;
8197 config->hdr.offset = offset;
8198 config->hdr.reserved0 = 0;
8199 config->hdr.reserved1 = 0;
8201 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8202 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8203 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8206 bp->rx_mode = rx_mode;
8207 bnx2x_set_storm_rx_mode(bp);
8210 static int bnx2x_poll(struct napi_struct *napi, int budget)
8212 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
8214 struct bnx2x *bp = fp->bp;
8217 #ifdef BNX2X_STOP_ON_ERROR
8218 if (unlikely(bp->panic))
8222 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
8223 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
8224 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
8226 bnx2x_update_fpsb_idx(fp);
8228 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
8229 bnx2x_tx_int(fp, budget);
8232 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
8233 work_done = bnx2x_rx_int(fp, budget);
8236 rmb(); /* bnx2x_has_work() reads the status block */
8238 /* must not complete if we consumed full budget */
8239 if ((work_done < budget) && !bnx2x_has_work(fp)) {
8241 #ifdef BNX2X_STOP_ON_ERROR
8244 netif_rx_complete(bp->dev, napi);
8246 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
8247 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
8248 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
8249 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
8255 /* Called with netif_tx_lock.
8256 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
8257 * netif_wake_queue().
8259 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8261 struct bnx2x *bp = netdev_priv(dev);
8262 struct bnx2x_fastpath *fp;
8263 struct sw_tx_bd *tx_buf;
8264 struct eth_tx_bd *tx_bd;
8265 struct eth_tx_parse_bd *pbd = NULL;
8266 u16 pkt_prod, bd_prod;
8267 int nbd, fp_index = 0;
8270 #ifdef BNX2X_STOP_ON_ERROR
8271 if (unlikely(bp->panic))
8272 return NETDEV_TX_BUSY;
8275 fp_index = smp_processor_id() % (bp->num_queues);
8277 fp = &bp->fp[fp_index];
8278 if (unlikely(bnx2x_tx_avail(bp->fp) <
8279 (skb_shinfo(skb)->nr_frags + 3))) {
8280 bp->slowpath->eth_stats.driver_xoff++,
8281 netif_stop_queue(dev);
8282 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
8283 return NETDEV_TX_BUSY;
8287 This is a bit ugly. First we use one BD which we mark as start,
8288 then for TSO or xsum we have a parsing info BD,
8289 and only then we have the rest of the TSO bds.
8290 (don't forget to mark the last one as last,
8291 and to unmap only AFTER you write to the BD ...)
8292 I would like to thank DovH for this mess.
8295 pkt_prod = fp->tx_pkt_prod++;
8296 bd_prod = fp->tx_bd_prod;
8297 bd_prod = TX_BD(bd_prod);
8299 /* get a tx_buff and first bd */
8300 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8301 tx_bd = &fp->tx_desc_ring[bd_prod];
8303 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
8304 tx_bd->general_data = (UNICAST_ADDRESS <<
8305 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8306 tx_bd->general_data |= 1; /* header nbd */
8308 /* remember the first bd of the packet */
8309 tx_buf->first_bd = bd_prod;
8311 DP(NETIF_MSG_TX_QUEUED,
8312 "sending pkt %u @%p next_idx %u bd %u @%p\n",
8313 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
8315 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8316 struct iphdr *iph = ip_hdr(skb);
8319 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
8321 /* turn on parsing and get a bd */
8322 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8323 pbd = (void *)&fp->tx_desc_ring[bd_prod];
8324 len = ((u8 *)iph - (u8 *)skb->data) / 2;
8326 /* for now NS flag is not used in Linux */
8327 pbd->global_data = (len |
8328 ((skb->protocol == ETH_P_8021Q) <<
8329 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8330 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8331 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
8332 if (iph->protocol == IPPROTO_TCP) {
8333 struct tcphdr *th = tcp_hdr(skb);
8335 tx_bd->bd_flags.as_bitfield |=
8336 ETH_TX_BD_FLAGS_TCP_CSUM;
8337 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
8338 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8339 pbd->tcp_pseudo_csum = swab16(th->check);
8341 } else if (iph->protocol == IPPROTO_UDP) {
8342 struct udphdr *uh = udp_hdr(skb);
8344 tx_bd->bd_flags.as_bitfield |=
8345 ETH_TX_BD_FLAGS_TCP_CSUM;
8346 pbd->total_hlen += cpu_to_le16(4);
8347 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
8348 pbd->cs_offset = 5; /* 10 >> 1 */
8349 pbd->tcp_pseudo_csum = 0;
8350 /* HW bug: we need to subtract 10 bytes before the
8351 * UDP header from the csum
8353 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
8354 csum_partial(((u8 *)(uh)-10), 10, 0)));
8358 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
8359 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
8360 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
8362 tx_bd->vlan = cpu_to_le16(pkt_prod);
8365 mapping = pci_map_single(bp->pdev, skb->data,
8366 skb->len, PCI_DMA_TODEVICE);
8368 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8369 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8370 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
8371 tx_bd->nbd = cpu_to_le16(nbd);
8372 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8374 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
8375 " nbytes %d flags %x vlan %u\n",
8376 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
8377 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
8379 if (skb_shinfo(skb)->gso_size &&
8380 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
8381 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
8383 DP(NETIF_MSG_TX_QUEUED,
8384 "TSO packet len %d hlen %d total len %d tso size %d\n",
8385 skb->len, hlen, skb_headlen(skb),
8386 skb_shinfo(skb)->gso_size);
8388 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
8390 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
8391 /* we split the first bd into headers and data bds
8392 * to ease the pain of our fellow micocode engineers
8393 * we use one mapping for both bds
8394 * So far this has only been observed to happen
8395 * in Other Operating Systems(TM)
8398 /* first fix first bd */
8400 tx_bd->nbd = cpu_to_le16(nbd);
8401 tx_bd->nbytes = cpu_to_le16(hlen);
8403 /* we only print this as an error
8404 * because we don't think this will ever happen.
8406 BNX2X_ERR("TSO split header size is %d (%x:%x)"
8407 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
8408 tx_bd->addr_lo, tx_bd->nbd);
8410 /* now get a new data bd
8411 * (after the pbd) and fill it */
8412 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8413 tx_bd = &fp->tx_desc_ring[bd_prod];
8415 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8416 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
8417 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
8418 tx_bd->vlan = cpu_to_le16(pkt_prod);
8419 /* this marks the bd
8420 * as one that has no individual mapping
8421 * the FW ignores this flag in a bd not marked start
8423 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8424 DP(NETIF_MSG_TX_QUEUED,
8425 "TSO split data size is %d (%x:%x)\n",
8426 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
8430 /* supposed to be unreached
8431 * (and therefore not handled properly...)
8433 BNX2X_ERR("LSO with no PBD\n");
8437 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
8438 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
8439 pbd->ip_id = swab16(ip_hdr(skb)->id);
8440 pbd->tcp_pseudo_csum =
8441 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
8443 0, IPPROTO_TCP, 0));
8444 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
8450 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8451 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8453 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8454 tx_bd = &fp->tx_desc_ring[bd_prod];
8456 mapping = pci_map_page(bp->pdev, frag->page,
8458 frag->size, PCI_DMA_TODEVICE);
8460 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8461 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8462 tx_bd->nbytes = cpu_to_le16(frag->size);
8463 tx_bd->vlan = cpu_to_le16(pkt_prod);
8464 tx_bd->bd_flags.as_bitfield = 0;
8465 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
8466 " addr (%x:%x) nbytes %d flags %x\n",
8467 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8468 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
8472 /* now at last mark the bd as the last bd */
8473 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
8475 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
8476 tx_bd, tx_bd->bd_flags.as_bitfield);
8480 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8482 /* now send a tx doorbell, counting the next bd
8483 * if the packet contains or ends with it
8485 if (TX_BD_POFF(bd_prod) < nbd)
8489 DP(NETIF_MSG_TX_QUEUED,
8490 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
8491 " tcp_flags %x xsum %x seq %u hlen %u\n",
8492 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
8493 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
8494 pbd->tcp_send_seq, pbd->total_hlen);
8496 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
8498 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
8499 mb(); /* FW restriction: must not reorder writing nbd and packets */
8500 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
8501 DOORBELL(bp, fp_index, 0);
8505 fp->tx_bd_prod = bd_prod;
8506 dev->trans_start = jiffies;
8508 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
8509 netif_stop_queue(dev);
8510 bp->slowpath->eth_stats.driver_xoff++;
8511 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
8512 netif_wake_queue(dev);
8516 return NETDEV_TX_OK;
8519 static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
8524 /* Called with rtnl_lock */
8525 static int bnx2x_open(struct net_device *dev)
8527 struct bnx2x *bp = netdev_priv(dev);
8529 bnx2x_set_power_state(bp, PCI_D0);
8531 return bnx2x_nic_load(bp, 1);
8534 /* Called with rtnl_lock */
8535 static int bnx2x_close(struct net_device *dev)
8538 struct bnx2x *bp = netdev_priv(dev);
8540 /* Unload the driver, release IRQs */
8541 rc = bnx2x_nic_unload(bp, 1);
8543 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
8546 bnx2x_set_power_state(bp, PCI_D3hot);
8551 /* Called with rtnl_lock */
8552 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
8554 struct sockaddr *addr = p;
8555 struct bnx2x *bp = netdev_priv(dev);
8557 if (!is_valid_ether_addr(addr->sa_data))
8560 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8561 if (netif_running(dev))
8562 bnx2x_set_mac_addr(bp);
8567 /* Called with rtnl_lock */
8568 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8570 struct mii_ioctl_data *data = if_mii(ifr);
8571 struct bnx2x *bp = netdev_priv(dev);
8576 data->phy_id = bp->phy_addr;
8582 spin_lock_bh(&bp->phy_lock);
8583 if (bp->state == BNX2X_STATE_OPEN) {
8584 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
8587 data->val_out = mii_regval;
8591 spin_unlock_bh(&bp->phy_lock);
8596 if (!capable(CAP_NET_ADMIN))
8599 spin_lock_bh(&bp->phy_lock);
8600 if (bp->state == BNX2X_STATE_OPEN) {
8601 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
8606 spin_unlock_bh(&bp->phy_lock);
8617 /* Called with rtnl_lock */
8618 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8620 struct bnx2x *bp = netdev_priv(dev);
8622 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
8623 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
8626 /* This does not race with packet allocation
8627 * because the actual alloc size is
8628 * only updated as part of load
8632 if (netif_running(dev)) {
8633 bnx2x_nic_unload(bp, 0);
8634 bnx2x_nic_load(bp, 0);
8639 static void bnx2x_tx_timeout(struct net_device *dev)
8641 struct bnx2x *bp = netdev_priv(dev);
8643 #ifdef BNX2X_STOP_ON_ERROR
8647 /* This allows the netif to be shutdown gracefully before resetting */
8648 schedule_work(&bp->reset_task);
8652 /* Called with rtnl_lock */
8653 static void bnx2x_vlan_rx_register(struct net_device *dev,
8654 struct vlan_group *vlgrp)
8656 struct bnx2x *bp = netdev_priv(dev);
8659 if (netif_running(dev))
8660 bnx2x_set_rx_mode(dev);
8664 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8665 static void poll_bnx2x(struct net_device *dev)
8667 struct bnx2x *bp = netdev_priv(dev);
8669 disable_irq(bp->pdev->irq);
8670 bnx2x_interrupt(bp->pdev->irq, dev);
8671 enable_irq(bp->pdev->irq);
8675 static void bnx2x_reset_task(struct work_struct *work)
8677 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
8679 #ifdef BNX2X_STOP_ON_ERROR
8680 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8681 " so reset not done to allow debug dump,\n"
8682 KERN_ERR " you will need to reboot when done\n");
8686 if (!netif_running(bp->dev))
8689 bp->in_reset_task = 1;
8691 bnx2x_netif_stop(bp);
8693 bnx2x_nic_unload(bp, 0);
8694 bnx2x_nic_load(bp, 0);
8696 bp->in_reset_task = 0;
8699 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
8700 struct net_device *dev)
8705 SET_NETDEV_DEV(dev, &pdev->dev);
8706 bp = netdev_priv(dev);
8709 bp->port = PCI_FUNC(pdev->devfn);
8711 rc = pci_enable_device(pdev);
8713 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
8717 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8718 printk(KERN_ERR PFX "Cannot find PCI device base address,"
8721 goto err_out_disable;
8724 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8725 printk(KERN_ERR PFX "Cannot find second PCI device"
8726 " base address, aborting\n");
8728 goto err_out_disable;
8731 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8733 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
8735 goto err_out_disable;
8738 pci_set_master(pdev);
8740 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8741 if (bp->pm_cap == 0) {
8742 printk(KERN_ERR PFX "Cannot find power management"
8743 " capability, aborting\n");
8745 goto err_out_release;
8748 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8749 if (bp->pcie_cap == 0) {
8750 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
8753 goto err_out_release;
8756 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
8757 bp->flags |= USING_DAC_FLAG;
8758 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
8759 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
8760 " failed, aborting\n");
8762 goto err_out_release;
8765 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
8766 printk(KERN_ERR PFX "System does not support DMA,"
8769 goto err_out_release;
8775 spin_lock_init(&bp->phy_lock);
8777 bp->in_reset_task = 0;
8779 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8780 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
8782 dev->base_addr = pci_resource_start(pdev, 0);
8784 dev->irq = pdev->irq;
8786 bp->regview = ioremap_nocache(dev->base_addr,
8787 pci_resource_len(pdev, 0));
8789 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
8791 goto err_out_release;
8794 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
8795 pci_resource_len(pdev, 2));
8796 if (!bp->doorbells) {
8797 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
8802 bnx2x_set_power_state(bp, PCI_D0);
8804 bnx2x_get_hwinfo(bp);
8806 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
8807 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
8808 " will only init first device\n");
8814 printk(KERN_ERR PFX "MCP disabled, will only"
8815 " init first device\n");
8819 if (onefunc && bp->port) {
8820 printk(KERN_ERR PFX "Second device disabled, exiting\n");
8825 bp->tx_ring_size = MAX_TX_AVAIL;
8826 bp->rx_ring_size = MAX_RX_AVAIL;
8832 bp->tx_quick_cons_trip_int = 0xff;
8833 bp->tx_quick_cons_trip = 0xff;
8834 bp->tx_ticks_int = 50;
8837 bp->rx_quick_cons_trip_int = 0xff;
8838 bp->rx_quick_cons_trip = 0xff;
8839 bp->rx_ticks_int = 25;
8842 bp->stats_ticks = 1000000 & 0xffff00;
8844 bp->timer_interval = HZ;
8845 bp->current_interval = (poll ? poll : HZ);
8847 init_timer(&bp->timer);
8848 bp->timer.expires = jiffies + bp->current_interval;
8849 bp->timer.data = (unsigned long) bp;
8850 bp->timer.function = bnx2x_timer;
8856 iounmap(bp->regview);
8860 if (bp->doorbells) {
8861 iounmap(bp->doorbells);
8862 bp->doorbells = NULL;
8866 pci_release_regions(pdev);
8869 pci_disable_device(pdev);
8870 pci_set_drvdata(pdev, NULL);
8876 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8877 const struct pci_device_id *ent)
8879 static int version_printed;
8880 struct net_device *dev = NULL;
8883 int port = PCI_FUNC(pdev->devfn);
8885 if (version_printed++ == 0)
8886 printk(KERN_INFO "%s", version);
8888 /* dev zeroed in init_etherdev */
8889 dev = alloc_etherdev(sizeof(*bp));
8893 netif_carrier_off(dev);
8895 bp = netdev_priv(dev);
8896 bp->msglevel = debug;
8898 if (port && onefunc) {
8899 printk(KERN_ERR PFX "second function disabled. exiting\n");
8903 rc = bnx2x_init_board(pdev, dev);
8909 dev->hard_start_xmit = bnx2x_start_xmit;
8910 dev->watchdog_timeo = TX_TIMEOUT;
8912 dev->get_stats = bnx2x_get_stats;
8913 dev->ethtool_ops = &bnx2x_ethtool_ops;
8914 dev->open = bnx2x_open;
8915 dev->stop = bnx2x_close;
8916 dev->set_multicast_list = bnx2x_set_rx_mode;
8917 dev->set_mac_address = bnx2x_change_mac_addr;
8918 dev->do_ioctl = bnx2x_ioctl;
8919 dev->change_mtu = bnx2x_change_mtu;
8920 dev->tx_timeout = bnx2x_tx_timeout;
8922 dev->vlan_rx_register = bnx2x_vlan_rx_register;
8924 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8925 dev->poll_controller = poll_bnx2x;
8927 dev->features |= NETIF_F_SG;
8928 if (bp->flags & USING_DAC_FLAG)
8929 dev->features |= NETIF_F_HIGHDMA;
8930 dev->features |= NETIF_F_IP_CSUM;
8932 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8934 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8936 rc = register_netdev(dev);
8938 dev_err(&pdev->dev, "Cannot register net device\n");
8940 iounmap(bp->regview);
8942 iounmap(bp->doorbells);
8943 pci_release_regions(pdev);
8944 pci_disable_device(pdev);
8945 pci_set_drvdata(pdev, NULL);
8950 pci_set_drvdata(pdev, dev);
8952 bp->name = board_info[ent->driver_data].name;
8953 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz "
8954 "found at mem %lx, IRQ %d, ",
8955 dev->name, bp->name,
8956 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8957 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8958 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
8959 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
8964 printk("node addr ");
8965 for (i = 0; i < 6; i++)
8966 printk("%2.2x", dev->dev_addr[i]);
8972 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8974 struct net_device *dev = pci_get_drvdata(pdev);
8975 struct bnx2x *bp = netdev_priv(dev);
8977 flush_scheduled_work();
8978 /*tasklet_kill(&bp->sp_task);*/
8979 unregister_netdev(dev);
8982 iounmap(bp->regview);
8985 iounmap(bp->doorbells);
8988 pci_release_regions(pdev);
8989 pci_disable_device(pdev);
8990 pci_set_drvdata(pdev, NULL);
8993 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
8995 struct net_device *dev = pci_get_drvdata(pdev);
8996 struct bnx2x *bp = netdev_priv(dev);
8999 if (!netif_running(dev))
9002 rc = bnx2x_nic_unload(bp, 0);
9006 netif_device_detach(dev);
9007 pci_save_state(pdev);
9009 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9013 static int bnx2x_resume(struct pci_dev *pdev)
9015 struct net_device *dev = pci_get_drvdata(pdev);
9016 struct bnx2x *bp = netdev_priv(dev);
9019 if (!netif_running(dev))
9022 pci_restore_state(pdev);
9024 bnx2x_set_power_state(bp, PCI_D0);
9025 netif_device_attach(dev);
9027 rc = bnx2x_nic_load(bp, 0);
9034 static struct pci_driver bnx2x_pci_driver = {
9035 .name = DRV_MODULE_NAME,
9036 .id_table = bnx2x_pci_tbl,
9037 .probe = bnx2x_init_one,
9038 .remove = __devexit_p(bnx2x_remove_one),
9039 .suspend = bnx2x_suspend,
9040 .resume = bnx2x_resume,
9043 static int __init bnx2x_init(void)
9045 return pci_register_driver(&bnx2x_pci_driver);
9048 static void __exit bnx2x_cleanup(void)
9050 pci_unregister_driver(&bnx2x_pci_driver);
9053 module_init(bnx2x_init);
9054 module_exit(bnx2x_cleanup);