1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/device.h> /* for dev_info() */
24 #include <linux/timer.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/init.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/bitops.h>
36 #include <linux/irq.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/time.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
54 #include <linux/semaphore.h>
55 #include <linux/stringify.h>
56 #include <linux/vmalloc.h>
59 #include "bnx2x_init.h"
60 #include "bnx2x_init_ops.h"
61 #include "bnx2x_cmn.h"
62 #include "bnx2x_vfpf.h"
63 #include "bnx2x_sriov.h"
64 #include "bnx2x_dcb.h"
67 #include <linux/firmware.h>
68 #include "bnx2x_fw_file_hdr.h"
70 #define FW_FILE_VERSION \
71 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
72 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
73 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
74 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
75 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
76 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
77 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
79 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
81 /* Time in jiffies before concluding the transmitter is hung */
82 #define TX_TIMEOUT (5*HZ)
84 static char version[] =
85 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
86 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
88 MODULE_AUTHOR("Eliezer Tamir");
89 MODULE_DESCRIPTION("Broadcom NetXtreme II "
90 "BCM57710/57711/57711E/"
91 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
92 "57840/57840_MF Driver");
93 MODULE_LICENSE("GPL");
94 MODULE_VERSION(DRV_MODULE_VERSION);
95 MODULE_FIRMWARE(FW_FILE_NAME_E1);
96 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
97 MODULE_FIRMWARE(FW_FILE_NAME_E2);
101 module_param(num_queues, int, 0);
102 MODULE_PARM_DESC(num_queues,
103 " Set number of queues (default is as a number of CPUs)");
105 static int disable_tpa;
106 module_param(disable_tpa, int, 0);
107 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
109 #define INT_MODE_INTx 1
110 #define INT_MODE_MSI 2
112 module_param(int_mode, int, 0);
113 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
116 static int dropless_fc;
117 module_param(dropless_fc, int, 0);
118 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
120 static int mrrs = -1;
121 module_param(mrrs, int, 0);
122 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
125 module_param(debug, int, 0);
126 MODULE_PARM_DESC(debug, " Default debug msglevel");
130 struct workqueue_struct *bnx2x_wq;
132 enum bnx2x_board_type {
156 /* indexed by board_type, above */
160 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
161 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
162 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
163 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
164 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
165 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
166 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
167 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
168 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
169 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
170 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
171 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
172 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
173 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
174 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
175 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
176 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
177 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
178 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
179 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
180 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
183 #ifndef PCI_DEVICE_ID_NX2_57710
184 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
186 #ifndef PCI_DEVICE_ID_NX2_57711
187 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
189 #ifndef PCI_DEVICE_ID_NX2_57711E
190 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
192 #ifndef PCI_DEVICE_ID_NX2_57712
193 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
195 #ifndef PCI_DEVICE_ID_NX2_57712_MF
196 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
198 #ifndef PCI_DEVICE_ID_NX2_57800
199 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
201 #ifndef PCI_DEVICE_ID_NX2_57800_MF
202 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
204 #ifndef PCI_DEVICE_ID_NX2_57810
205 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
207 #ifndef PCI_DEVICE_ID_NX2_57810_MF
208 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
210 #ifndef PCI_DEVICE_ID_NX2_57840_O
211 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
213 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
214 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
216 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
217 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
219 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
220 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
222 #ifndef PCI_DEVICE_ID_NX2_57840_MF
223 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
225 #ifndef PCI_DEVICE_ID_NX2_57811
226 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
228 #ifndef PCI_DEVICE_ID_NX2_57811_MF
229 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
231 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
232 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
233 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
234 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
235 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
236 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
237 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
238 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
239 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
240 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
241 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
242 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
243 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
244 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
245 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
246 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
247 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
251 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
253 /* Global resources for unloading a previously loaded device */
254 #define BNX2X_PREV_WAIT_NEEDED 1
255 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
256 static LIST_HEAD(bnx2x_prev_list);
257 /****************************************************************************
258 * General service functions
259 ****************************************************************************/
261 static void __storm_memset_dma_mapping(struct bnx2x *bp,
262 u32 addr, dma_addr_t mapping)
264 REG_WR(bp, addr, U64_LO(mapping));
265 REG_WR(bp, addr + 4, U64_HI(mapping));
268 static void storm_memset_spq_addr(struct bnx2x *bp,
269 dma_addr_t mapping, u16 abs_fid)
271 u32 addr = XSEM_REG_FAST_MEMORY +
272 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
274 __storm_memset_dma_mapping(bp, addr, mapping);
277 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
280 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
282 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
284 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
286 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
290 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
293 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
295 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
297 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
299 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
303 static void storm_memset_eq_data(struct bnx2x *bp,
304 struct event_ring_data *eq_data,
307 size_t size = sizeof(struct event_ring_data);
309 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
311 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
314 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
317 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
318 REG_WR16(bp, addr, eq_prod);
322 * locking is done by mcp
324 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
326 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
327 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
328 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
329 PCICFG_VENDOR_ID_OFFSET);
332 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
336 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
337 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
338 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
339 PCICFG_VENDOR_ID_OFFSET);
344 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
345 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
346 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
347 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
348 #define DMAE_DP_DST_NONE "dst_addr [none]"
350 void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
352 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
354 switch (dmae->opcode & DMAE_COMMAND_DST) {
355 case DMAE_CMD_DST_PCI:
356 if (src_type == DMAE_CMD_SRC_PCI)
357 DP(msglvl, "DMAE: opcode 0x%08x\n"
358 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
359 "comp_addr [%x:%08x], comp_val 0x%08x\n",
360 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
361 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
362 dmae->comp_addr_hi, dmae->comp_addr_lo,
365 DP(msglvl, "DMAE: opcode 0x%08x\n"
366 "src [%08x], len [%d*4], dst [%x:%08x]\n"
367 "comp_addr [%x:%08x], comp_val 0x%08x\n",
368 dmae->opcode, dmae->src_addr_lo >> 2,
369 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
370 dmae->comp_addr_hi, dmae->comp_addr_lo,
373 case DMAE_CMD_DST_GRC:
374 if (src_type == DMAE_CMD_SRC_PCI)
375 DP(msglvl, "DMAE: opcode 0x%08x\n"
376 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
377 "comp_addr [%x:%08x], comp_val 0x%08x\n",
378 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
379 dmae->len, dmae->dst_addr_lo >> 2,
380 dmae->comp_addr_hi, dmae->comp_addr_lo,
383 DP(msglvl, "DMAE: opcode 0x%08x\n"
384 "src [%08x], len [%d*4], dst [%08x]\n"
385 "comp_addr [%x:%08x], comp_val 0x%08x\n",
386 dmae->opcode, dmae->src_addr_lo >> 2,
387 dmae->len, dmae->dst_addr_lo >> 2,
388 dmae->comp_addr_hi, dmae->comp_addr_lo,
392 if (src_type == DMAE_CMD_SRC_PCI)
393 DP(msglvl, "DMAE: opcode 0x%08x\n"
394 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
395 "comp_addr [%x:%08x] comp_val 0x%08x\n",
396 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
397 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
400 DP(msglvl, "DMAE: opcode 0x%08x\n"
401 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
402 "comp_addr [%x:%08x] comp_val 0x%08x\n",
403 dmae->opcode, dmae->src_addr_lo >> 2,
404 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
410 /* copy command into DMAE command memory and set DMAE command go */
411 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
416 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
417 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
418 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
420 REG_WR(bp, dmae_reg_go_c[idx], 1);
423 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
425 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
429 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
431 return opcode & ~DMAE_CMD_SRC_RESET;
434 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
435 bool with_comp, u8 comp_type)
439 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
440 (dst_type << DMAE_COMMAND_DST_SHIFT));
442 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
444 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
445 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
446 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
447 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
450 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
452 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
455 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
459 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
460 struct dmae_command *dmae,
461 u8 src_type, u8 dst_type)
463 memset(dmae, 0, sizeof(struct dmae_command));
466 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
467 true, DMAE_COMP_PCI);
469 /* fill in the completion parameters */
470 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
471 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
472 dmae->comp_val = DMAE_COMP_VAL;
475 /* issue a dmae command over the init-channel and wait for completion */
476 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
478 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
479 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
483 * Lock the dmae channel. Disable BHs to prevent a dead-lock
484 * as long as this code is called both from syscall context and
485 * from ndo_set_rx_mode() flow that may be called from BH.
487 spin_lock_bh(&bp->dmae_lock);
489 /* reset completion */
492 /* post the command on the channel used for initializations */
493 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
495 /* wait for completion */
497 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
500 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
501 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
502 BNX2X_ERR("DMAE timeout!\n");
509 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
510 BNX2X_ERR("DMAE PCI error!\n");
515 spin_unlock_bh(&bp->dmae_lock);
519 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
522 struct dmae_command dmae;
524 if (!bp->dmae_ready) {
525 u32 *data = bnx2x_sp(bp, wb_data[0]);
528 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
530 bnx2x_init_str_wr(bp, dst_addr, data, len32);
534 /* set opcode and fixed command fields */
535 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
537 /* fill in addresses and len */
538 dmae.src_addr_lo = U64_LO(dma_addr);
539 dmae.src_addr_hi = U64_HI(dma_addr);
540 dmae.dst_addr_lo = dst_addr >> 2;
541 dmae.dst_addr_hi = 0;
544 /* issue the command and wait for completion */
545 bnx2x_issue_dmae_with_comp(bp, &dmae);
548 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
550 struct dmae_command dmae;
552 if (!bp->dmae_ready) {
553 u32 *data = bnx2x_sp(bp, wb_data[0]);
557 for (i = 0; i < len32; i++)
558 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
560 for (i = 0; i < len32; i++)
561 data[i] = REG_RD(bp, src_addr + i*4);
566 /* set opcode and fixed command fields */
567 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
569 /* fill in addresses and len */
570 dmae.src_addr_lo = src_addr >> 2;
571 dmae.src_addr_hi = 0;
572 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
573 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
576 /* issue the command and wait for completion */
577 bnx2x_issue_dmae_with_comp(bp, &dmae);
580 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
583 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
586 while (len > dmae_wr_max) {
587 bnx2x_write_dmae(bp, phys_addr + offset,
588 addr + offset, dmae_wr_max);
589 offset += dmae_wr_max * 4;
593 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
596 static int bnx2x_mc_assert(struct bnx2x *bp)
600 u32 row0, row1, row2, row3;
603 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
604 XSTORM_ASSERT_LIST_INDEX_OFFSET);
606 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
608 /* print the asserts */
609 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
611 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
612 XSTORM_ASSERT_LIST_OFFSET(i));
613 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
614 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
615 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
616 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
617 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
618 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
620 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
621 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
622 i, row3, row2, row1, row0);
630 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
631 TSTORM_ASSERT_LIST_INDEX_OFFSET);
633 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
635 /* print the asserts */
636 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
638 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
639 TSTORM_ASSERT_LIST_OFFSET(i));
640 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
641 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
642 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
643 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
644 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
645 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
647 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
648 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
649 i, row3, row2, row1, row0);
657 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
658 CSTORM_ASSERT_LIST_INDEX_OFFSET);
660 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
662 /* print the asserts */
663 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
665 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
666 CSTORM_ASSERT_LIST_OFFSET(i));
667 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
668 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
669 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
670 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
671 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
672 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
674 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
675 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
676 i, row3, row2, row1, row0);
684 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
685 USTORM_ASSERT_LIST_INDEX_OFFSET);
687 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
689 /* print the asserts */
690 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
692 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
693 USTORM_ASSERT_LIST_OFFSET(i));
694 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
695 USTORM_ASSERT_LIST_OFFSET(i) + 4);
696 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
697 USTORM_ASSERT_LIST_OFFSET(i) + 8);
698 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
699 USTORM_ASSERT_LIST_OFFSET(i) + 12);
701 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
702 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
703 i, row3, row2, row1, row0);
713 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
719 u32 trace_shmem_base;
721 BNX2X_ERR("NO MCP - can not dump\n");
724 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
725 (bp->common.bc_ver & 0xff0000) >> 16,
726 (bp->common.bc_ver & 0xff00) >> 8,
727 (bp->common.bc_ver & 0xff));
729 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
730 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
731 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
733 if (BP_PATH(bp) == 0)
734 trace_shmem_base = bp->common.shmem_base;
736 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
737 addr = trace_shmem_base - 0x800;
739 /* validate TRCB signature */
740 mark = REG_RD(bp, addr);
741 if (mark != MFW_TRACE_SIGNATURE) {
742 BNX2X_ERR("Trace buffer signature is missing.");
746 /* read cyclic buffer pointer */
748 mark = REG_RD(bp, addr);
749 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
750 + ((mark + 0x3) & ~0x3) - 0x08000000;
751 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
754 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
755 for (word = 0; word < 8; word++)
756 data[word] = htonl(REG_RD(bp, offset + 4*word));
758 pr_cont("%s", (char *)data);
760 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
761 for (word = 0; word < 8; word++)
762 data[word] = htonl(REG_RD(bp, offset + 4*word));
764 pr_cont("%s", (char *)data);
766 printk("%s" "end of fw dump\n", lvl);
769 static void bnx2x_fw_dump(struct bnx2x *bp)
771 bnx2x_fw_dump_lvl(bp, KERN_ERR);
774 void bnx2x_panic_dump(struct bnx2x *bp)
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780 #ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
785 bp->stats_state = STATS_STATE_DISABLED;
786 bp->eth_stats.unrecoverable_error++;
787 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
789 BNX2X_ERR("begin crash dump -----------------\n");
793 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
794 bp->def_idx, bp->def_att_idx, bp->attn_state,
795 bp->spq_prod_idx, bp->stats_counter);
796 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
797 bp->def_status_blk->atten_status_block.attn_bits,
798 bp->def_status_blk->atten_status_block.attn_bits_ack,
799 bp->def_status_blk->atten_status_block.status_block_id,
800 bp->def_status_blk->atten_status_block.attn_bits_index);
802 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
804 bp->def_status_blk->sp_sb.index_values[i],
805 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
807 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
808 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
809 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
812 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
813 sp_sb_data.igu_sb_id,
814 sp_sb_data.igu_seg_id,
815 sp_sb_data.p_func.pf_id,
816 sp_sb_data.p_func.vnic_id,
817 sp_sb_data.p_func.vf_id,
818 sp_sb_data.p_func.vf_valid,
822 for_each_eth_queue(bp, i) {
823 struct bnx2x_fastpath *fp = &bp->fp[i];
825 struct hc_status_block_data_e2 sb_data_e2;
826 struct hc_status_block_data_e1x sb_data_e1x;
827 struct hc_status_block_sm *hc_sm_p =
829 sb_data_e1x.common.state_machine :
830 sb_data_e2.common.state_machine;
831 struct hc_index_data *hc_index_p =
833 sb_data_e1x.index_data :
834 sb_data_e2.index_data;
837 struct bnx2x_fp_txdata txdata;
840 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
841 i, fp->rx_bd_prod, fp->rx_bd_cons,
843 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
844 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
845 fp->rx_sge_prod, fp->last_max_sge,
846 le16_to_cpu(fp->fp_hc_idx));
849 for_each_cos_in_tx_queue(fp, cos)
851 txdata = *fp->txdata_ptr[cos];
852 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
853 i, txdata.tx_pkt_prod,
854 txdata.tx_pkt_cons, txdata.tx_bd_prod,
856 le16_to_cpu(*txdata.tx_cons_sb));
859 loop = CHIP_IS_E1x(bp) ?
860 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
867 BNX2X_ERR(" run indexes (");
868 for (j = 0; j < HC_SB_MAX_SM; j++)
870 fp->sb_running_index[j],
871 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
873 BNX2X_ERR(" indexes (");
874 for (j = 0; j < loop; j++)
876 fp->sb_index_values[j],
877 (j == loop - 1) ? ")" : " ");
879 data_size = CHIP_IS_E1x(bp) ?
880 sizeof(struct hc_status_block_data_e1x) :
881 sizeof(struct hc_status_block_data_e2);
882 data_size /= sizeof(u32);
883 sb_data_p = CHIP_IS_E1x(bp) ?
884 (u32 *)&sb_data_e1x :
886 /* copy sb data in here */
887 for (j = 0; j < data_size; j++)
888 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
889 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
892 if (!CHIP_IS_E1x(bp)) {
893 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
894 sb_data_e2.common.p_func.pf_id,
895 sb_data_e2.common.p_func.vf_id,
896 sb_data_e2.common.p_func.vf_valid,
897 sb_data_e2.common.p_func.vnic_id,
898 sb_data_e2.common.same_igu_sb_1b,
899 sb_data_e2.common.state);
901 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
902 sb_data_e1x.common.p_func.pf_id,
903 sb_data_e1x.common.p_func.vf_id,
904 sb_data_e1x.common.p_func.vf_valid,
905 sb_data_e1x.common.p_func.vnic_id,
906 sb_data_e1x.common.same_igu_sb_1b,
907 sb_data_e1x.common.state);
911 for (j = 0; j < HC_SB_MAX_SM; j++) {
912 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
913 j, hc_sm_p[j].__flags,
914 hc_sm_p[j].igu_sb_id,
915 hc_sm_p[j].igu_seg_id,
916 hc_sm_p[j].time_to_expire,
917 hc_sm_p[j].timer_value);
921 for (j = 0; j < loop; j++) {
922 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
924 hc_index_p[j].timeout);
928 #ifdef BNX2X_STOP_ON_ERROR
931 for_each_valid_rx_queue(bp, i) {
932 struct bnx2x_fastpath *fp = &bp->fp[i];
934 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
935 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
936 for (j = start; j != end; j = RX_BD(j + 1)) {
937 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
938 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
940 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
941 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
944 start = RX_SGE(fp->rx_sge_prod);
945 end = RX_SGE(fp->last_max_sge);
946 for (j = start; j != end; j = RX_SGE(j + 1)) {
947 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
948 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
950 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
951 i, j, rx_sge[1], rx_sge[0], sw_page->page);
954 start = RCQ_BD(fp->rx_comp_cons - 10);
955 end = RCQ_BD(fp->rx_comp_cons + 503);
956 for (j = start; j != end; j = RCQ_BD(j + 1)) {
957 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
959 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
960 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
965 for_each_valid_tx_queue(bp, i) {
966 struct bnx2x_fastpath *fp = &bp->fp[i];
967 for_each_cos_in_tx_queue(fp, cos) {
968 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
970 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
971 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
972 for (j = start; j != end; j = TX_BD(j + 1)) {
973 struct sw_tx_bd *sw_bd =
974 &txdata->tx_buf_ring[j];
976 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
977 i, cos, j, sw_bd->skb,
981 start = TX_BD(txdata->tx_bd_cons - 10);
982 end = TX_BD(txdata->tx_bd_cons + 254);
983 for (j = start; j != end; j = TX_BD(j + 1)) {
984 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
986 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
987 i, cos, j, tx_bd[0], tx_bd[1],
995 BNX2X_ERR("end crash dump -----------------\n");
1001 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1004 #define FLR_WAIT_USEC 10000 /* 10 miliseconds */
1005 #define FLR_WAIT_INTERVAL 50 /* usec */
1006 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1008 struct pbf_pN_buf_regs {
1015 struct pbf_pN_cmd_regs {
1021 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1022 struct pbf_pN_buf_regs *regs,
1025 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1026 u32 cur_cnt = poll_count;
1028 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1029 crd = crd_start = REG_RD(bp, regs->crd);
1030 init_crd = REG_RD(bp, regs->init_crd);
1032 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1033 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1034 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1036 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1037 (init_crd - crd_start))) {
1039 udelay(FLR_WAIT_INTERVAL);
1040 crd = REG_RD(bp, regs->crd);
1041 crd_freed = REG_RD(bp, regs->crd_freed);
1043 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1045 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1047 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1048 regs->pN, crd_freed);
1052 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1053 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1056 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1057 struct pbf_pN_cmd_regs *regs,
1060 u32 occup, to_free, freed, freed_start;
1061 u32 cur_cnt = poll_count;
1063 occup = to_free = REG_RD(bp, regs->lines_occup);
1064 freed = freed_start = REG_RD(bp, regs->lines_freed);
1066 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1067 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1069 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1071 udelay(FLR_WAIT_INTERVAL);
1072 occup = REG_RD(bp, regs->lines_occup);
1073 freed = REG_RD(bp, regs->lines_freed);
1075 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1077 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1079 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1084 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1085 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1088 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1089 u32 expected, u32 poll_count)
1091 u32 cur_cnt = poll_count;
1094 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1095 udelay(FLR_WAIT_INTERVAL);
1100 static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1101 char *msg, u32 poll_cnt)
1103 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1105 BNX2X_ERR("%s usage count=%d\n", msg, val);
1111 static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1113 /* adjust polling timeout */
1114 if (CHIP_REV_IS_EMUL(bp))
1115 return FLR_POLL_CNT * 2000;
1117 if (CHIP_REV_IS_FPGA(bp))
1118 return FLR_POLL_CNT * 120;
1120 return FLR_POLL_CNT;
1123 static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1125 struct pbf_pN_cmd_regs cmd_regs[] = {
1126 {0, (CHIP_IS_E3B0(bp)) ?
1127 PBF_REG_TQ_OCCUPANCY_Q0 :
1128 PBF_REG_P0_TQ_OCCUPANCY,
1129 (CHIP_IS_E3B0(bp)) ?
1130 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1131 PBF_REG_P0_TQ_LINES_FREED_CNT},
1132 {1, (CHIP_IS_E3B0(bp)) ?
1133 PBF_REG_TQ_OCCUPANCY_Q1 :
1134 PBF_REG_P1_TQ_OCCUPANCY,
1135 (CHIP_IS_E3B0(bp)) ?
1136 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1137 PBF_REG_P1_TQ_LINES_FREED_CNT},
1138 {4, (CHIP_IS_E3B0(bp)) ?
1139 PBF_REG_TQ_OCCUPANCY_LB_Q :
1140 PBF_REG_P4_TQ_OCCUPANCY,
1141 (CHIP_IS_E3B0(bp)) ?
1142 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1143 PBF_REG_P4_TQ_LINES_FREED_CNT}
1146 struct pbf_pN_buf_regs buf_regs[] = {
1147 {0, (CHIP_IS_E3B0(bp)) ?
1148 PBF_REG_INIT_CRD_Q0 :
1149 PBF_REG_P0_INIT_CRD ,
1150 (CHIP_IS_E3B0(bp)) ?
1153 (CHIP_IS_E3B0(bp)) ?
1154 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1155 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1156 {1, (CHIP_IS_E3B0(bp)) ?
1157 PBF_REG_INIT_CRD_Q1 :
1158 PBF_REG_P1_INIT_CRD,
1159 (CHIP_IS_E3B0(bp)) ?
1162 (CHIP_IS_E3B0(bp)) ?
1163 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1164 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1165 {4, (CHIP_IS_E3B0(bp)) ?
1166 PBF_REG_INIT_CRD_LB_Q :
1167 PBF_REG_P4_INIT_CRD,
1168 (CHIP_IS_E3B0(bp)) ?
1169 PBF_REG_CREDIT_LB_Q :
1171 (CHIP_IS_E3B0(bp)) ?
1172 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1173 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1178 /* Verify the command queues are flushed P0, P1, P4 */
1179 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1180 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1183 /* Verify the transmission buffers are flushed P0, P1, P4 */
1184 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1185 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1188 #define OP_GEN_PARAM(param) \
1189 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1191 #define OP_GEN_TYPE(type) \
1192 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1194 #define OP_GEN_AGG_VECT(index) \
1195 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1198 static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1201 struct sdm_op_gen op_gen = {0};
1203 u32 comp_addr = BAR_CSTRORM_INTMEM +
1204 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1207 if (REG_RD(bp, comp_addr)) {
1208 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1212 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1213 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1214 op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
1215 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1217 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1218 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
1220 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1221 BNX2X_ERR("FW final cleanup did not succeed\n");
1222 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1223 (REG_RD(bp, comp_addr)));
1226 /* Zero completion for nxt FLR */
1227 REG_WR(bp, comp_addr, 0);
1232 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1236 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1237 return status & PCI_EXP_DEVSTA_TRPND;
1240 /* PF FLR specific routines
1242 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1245 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1246 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1247 CFC_REG_NUM_LCIDS_INSIDE_PF,
1248 "CFC PF usage counter timed out",
1253 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1254 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1255 DORQ_REG_PF_USAGE_CNT,
1256 "DQ PF usage counter timed out",
1260 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1261 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1262 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1263 "QM PF usage counter timed out",
1267 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1268 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1269 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1270 "Timers VNIC usage counter timed out",
1273 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1274 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1275 "Timers NUM_SCANS usage counter timed out",
1279 /* Wait DMAE PF usage counter to zero */
1280 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1281 dmae_reg_go_c[INIT_DMAE_C(bp)],
1282 "DMAE dommand register timed out",
1289 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1293 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1294 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1296 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1297 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1299 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1300 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1302 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1303 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1305 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1306 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1308 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1309 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1311 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1312 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1314 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1315 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1319 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1321 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1323 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1325 /* Re-enable PF target read access */
1326 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1328 /* Poll HW usage counters */
1329 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1330 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1333 /* Zero the igu 'trailing edge' and 'leading edge' */
1335 /* Send the FW cleanup command */
1336 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1341 /* Verify TX hw is flushed */
1342 bnx2x_tx_hw_flushed(bp, poll_cnt);
1344 /* Wait 100ms (not adjusted according to platform) */
1347 /* Verify no pending pci transactions */
1348 if (bnx2x_is_pcie_pending(bp->pdev))
1349 BNX2X_ERR("PCIE Transactions still pending\n");
1352 bnx2x_hw_enable_status(bp);
1355 * Master enable - Due to WB DMAE writes performed before this
1356 * register is re-initialized as part of the regular function init
1358 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1363 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1365 int port = BP_PORT(bp);
1366 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1367 u32 val = REG_RD(bp, addr);
1368 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1369 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1370 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1373 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1374 HC_CONFIG_0_REG_INT_LINE_EN_0);
1375 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1376 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1378 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1380 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1381 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1382 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1383 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1385 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1386 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1387 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1388 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1390 if (!CHIP_IS_E1(bp)) {
1392 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1394 REG_WR(bp, addr, val);
1396 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1401 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1404 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1405 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1407 REG_WR(bp, addr, val);
1409 * Ensure that HC_CONFIG is written before leading/trailing edge config
1414 if (!CHIP_IS_E1(bp)) {
1415 /* init leading/trailing edge */
1417 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1419 /* enable nig and gpio3 attention */
1424 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1425 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1428 /* Make sure that interrupts are indeed enabled from here on */
1432 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1435 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1436 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1437 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1439 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1442 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1443 IGU_PF_CONF_SINGLE_ISR_EN);
1444 val |= (IGU_PF_CONF_FUNC_EN |
1445 IGU_PF_CONF_MSI_MSIX_EN |
1446 IGU_PF_CONF_ATTN_BIT_EN);
1449 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1451 val &= ~IGU_PF_CONF_INT_LINE_EN;
1452 val |= (IGU_PF_CONF_FUNC_EN |
1453 IGU_PF_CONF_MSI_MSIX_EN |
1454 IGU_PF_CONF_ATTN_BIT_EN |
1455 IGU_PF_CONF_SINGLE_ISR_EN);
1457 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1458 val |= (IGU_PF_CONF_FUNC_EN |
1459 IGU_PF_CONF_INT_LINE_EN |
1460 IGU_PF_CONF_ATTN_BIT_EN |
1461 IGU_PF_CONF_SINGLE_ISR_EN);
1464 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1465 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1467 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1469 if (val & IGU_PF_CONF_INT_LINE_EN)
1470 pci_intx(bp->pdev, true);
1474 /* init leading/trailing edge */
1476 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1478 /* enable nig and gpio3 attention */
1483 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1484 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1486 /* Make sure that interrupts are indeed enabled from here on */
1490 void bnx2x_int_enable(struct bnx2x *bp)
1492 if (bp->common.int_block == INT_BLOCK_HC)
1493 bnx2x_hc_int_enable(bp);
1495 bnx2x_igu_int_enable(bp);
1498 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1500 int port = BP_PORT(bp);
1501 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1502 u32 val = REG_RD(bp, addr);
1505 * in E1 we must use only PCI configuration space to disable
1506 * MSI/MSIX capablility
1507 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1509 if (CHIP_IS_E1(bp)) {
1510 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1511 * Use mask register to prevent from HC sending interrupts
1512 * after we exit the function
1514 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1516 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1517 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1518 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1520 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1521 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1522 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1523 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1525 DP(NETIF_MSG_IFDOWN,
1526 "write %x to HC %d (addr 0x%x)\n",
1529 /* flush all outstanding writes */
1532 REG_WR(bp, addr, val);
1533 if (REG_RD(bp, addr) != val)
1534 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1537 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1539 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1541 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1542 IGU_PF_CONF_INT_LINE_EN |
1543 IGU_PF_CONF_ATTN_BIT_EN);
1545 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
1547 /* flush all outstanding writes */
1550 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1551 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1552 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1555 static void bnx2x_int_disable(struct bnx2x *bp)
1557 if (bp->common.int_block == INT_BLOCK_HC)
1558 bnx2x_hc_int_disable(bp);
1560 bnx2x_igu_int_disable(bp);
1563 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1565 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1569 /* prevent the HW from sending interrupts */
1570 bnx2x_int_disable(bp);
1572 /* make sure all ISRs are done */
1574 synchronize_irq(bp->msix_table[0].vector);
1576 if (CNIC_SUPPORT(bp))
1578 for_each_eth_queue(bp, i)
1579 synchronize_irq(bp->msix_table[offset++].vector);
1581 synchronize_irq(bp->pdev->irq);
1583 /* make sure sp_task is not running */
1584 cancel_delayed_work(&bp->sp_task);
1585 cancel_delayed_work(&bp->period_task);
1586 flush_workqueue(bnx2x_wq);
1592 * General service functions
1595 /* Return true if succeeded to acquire the lock */
1596 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1599 u32 resource_bit = (1 << resource);
1600 int func = BP_FUNC(bp);
1601 u32 hw_lock_control_reg;
1603 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1604 "Trying to take a lock on resource %d\n", resource);
1606 /* Validating that the resource is within range */
1607 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1608 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1609 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1610 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1615 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1617 hw_lock_control_reg =
1618 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1620 /* Try to acquire the lock */
1621 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1622 lock_status = REG_RD(bp, hw_lock_control_reg);
1623 if (lock_status & resource_bit)
1626 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1627 "Failed to get a lock on resource %d\n", resource);
1632 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1634 * @bp: driver handle
1636 * Returns the recovery leader resource id according to the engine this function
1637 * belongs to. Currently only only 2 engines is supported.
1639 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1642 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1644 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1648 * bnx2x_trylock_leader_lock- try to aquire a leader lock.
1650 * @bp: driver handle
1652 * Tries to aquire a leader lock for current engine.
1654 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1656 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1659 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1661 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1662 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1664 /* Set the interrupt occurred bit for the sp-task to recognize it
1665 * must ack the interrupt and transition according to the IGU
1668 atomic_set(&bp->interrupt_occurred, 1);
1670 /* The sp_task must execute only after this bit
1671 * is set, otherwise we will get out of sync and miss all
1672 * further interrupts. Hence, the barrier.
1676 /* schedule sp_task to workqueue */
1677 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1680 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1682 struct bnx2x *bp = fp->bp;
1683 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1684 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1685 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1686 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1689 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1690 fp->index, cid, command, bp->state,
1691 rr_cqe->ramrod_cqe.ramrod_type);
1693 /* If cid is within VF range, replace the slowpath object with the
1694 * one corresponding to this VF
1696 if (cid >= BNX2X_FIRST_VF_CID &&
1697 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1698 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1701 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1702 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1703 drv_cmd = BNX2X_Q_CMD_UPDATE;
1706 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1707 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1708 drv_cmd = BNX2X_Q_CMD_SETUP;
1711 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1712 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1713 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1716 case (RAMROD_CMD_ID_ETH_HALT):
1717 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1718 drv_cmd = BNX2X_Q_CMD_HALT;
1721 case (RAMROD_CMD_ID_ETH_TERMINATE):
1722 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
1723 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1726 case (RAMROD_CMD_ID_ETH_EMPTY):
1727 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1728 drv_cmd = BNX2X_Q_CMD_EMPTY;
1732 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1733 command, fp->index);
1737 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1738 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1739 /* q_obj->complete_cmd() failure means that this was
1740 * an unexpected completion.
1742 * In this case we don't want to increase the bp->spq_left
1743 * because apparently we haven't sent this command the first
1746 #ifdef BNX2X_STOP_ON_ERROR
1751 /* SRIOV: reschedule any 'in_progress' operations */
1752 bnx2x_iov_sp_event(bp, cid, true);
1754 smp_mb__before_atomic_inc();
1755 atomic_inc(&bp->cq_spq_left);
1756 /* push the change in bp->spq_left and towards the memory */
1757 smp_mb__after_atomic_inc();
1759 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1761 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1762 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1763 /* if Q update ramrod is completed for last Q in AFEX vif set
1764 * flow, then ACK MCP at the end
1766 * mark pending ACK to MCP bit.
1767 * prevent case that both bits are cleared.
1768 * At the end of load/unload driver checks that
1769 * sp_state is cleaerd, and this order prevents
1772 smp_mb__before_clear_bit();
1773 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1775 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1776 smp_mb__after_clear_bit();
1778 /* schedule the sp task as mcp ack is required */
1779 bnx2x_schedule_sp_task(bp);
1785 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1787 struct bnx2x *bp = netdev_priv(dev_instance);
1788 u16 status = bnx2x_ack_int(bp);
1793 /* Return here if interrupt is shared and it's not for us */
1794 if (unlikely(status == 0)) {
1795 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1798 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1800 #ifdef BNX2X_STOP_ON_ERROR
1801 if (unlikely(bp->panic))
1805 for_each_eth_queue(bp, i) {
1806 struct bnx2x_fastpath *fp = &bp->fp[i];
1808 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1809 if (status & mask) {
1810 /* Handle Rx or Tx according to SB id */
1811 prefetch(fp->rx_cons_sb);
1812 for_each_cos_in_tx_queue(fp, cos)
1813 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1814 prefetch(&fp->sb_running_index[SM_RX_ID]);
1815 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1820 if (CNIC_SUPPORT(bp)) {
1822 if (status & (mask | 0x1)) {
1823 struct cnic_ops *c_ops = NULL;
1825 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1827 c_ops = rcu_dereference(bp->cnic_ops);
1829 c_ops->cnic_handler(bp->cnic_data,
1838 if (unlikely(status & 0x1)) {
1840 /* schedule sp task to perform default status block work, ack
1841 * attentions and enable interrupts.
1843 bnx2x_schedule_sp_task(bp);
1850 if (unlikely(status))
1851 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1860 * General service functions
1863 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1866 u32 resource_bit = (1 << resource);
1867 int func = BP_FUNC(bp);
1868 u32 hw_lock_control_reg;
1871 /* Validating that the resource is within range */
1872 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1873 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1874 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1879 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1881 hw_lock_control_reg =
1882 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1885 /* Validating that the resource is not already taken */
1886 lock_status = REG_RD(bp, hw_lock_control_reg);
1887 if (lock_status & resource_bit) {
1888 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
1889 lock_status, resource_bit);
1893 /* Try for 5 second every 5ms */
1894 for (cnt = 0; cnt < 1000; cnt++) {
1895 /* Try to acquire the lock */
1896 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1897 lock_status = REG_RD(bp, hw_lock_control_reg);
1898 if (lock_status & resource_bit)
1903 BNX2X_ERR("Timeout\n");
1907 int bnx2x_release_leader_lock(struct bnx2x *bp)
1909 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1912 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1915 u32 resource_bit = (1 << resource);
1916 int func = BP_FUNC(bp);
1917 u32 hw_lock_control_reg;
1919 /* Validating that the resource is within range */
1920 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1921 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1922 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1927 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1929 hw_lock_control_reg =
1930 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1933 /* Validating that the resource is currently taken */
1934 lock_status = REG_RD(bp, hw_lock_control_reg);
1935 if (!(lock_status & resource_bit)) {
1936 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
1937 lock_status, resource_bit);
1941 REG_WR(bp, hw_lock_control_reg, resource_bit);
1946 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1948 /* The GPIO should be swapped if swap register is set and active */
1949 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1950 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1951 int gpio_shift = gpio_num +
1952 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1953 u32 gpio_mask = (1 << gpio_shift);
1957 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1958 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1962 /* read GPIO value */
1963 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1965 /* get the requested pin value */
1966 if ((gpio_reg & gpio_mask) == gpio_mask)
1971 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1976 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1978 /* The GPIO should be swapped if swap register is set and active */
1979 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1980 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1981 int gpio_shift = gpio_num +
1982 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1983 u32 gpio_mask = (1 << gpio_shift);
1986 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1987 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1991 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1992 /* read GPIO and mask except the float bits */
1993 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1996 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1998 "Set GPIO %d (shift %d) -> output low\n",
1999 gpio_num, gpio_shift);
2000 /* clear FLOAT and set CLR */
2001 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2002 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2005 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2007 "Set GPIO %d (shift %d) -> output high\n",
2008 gpio_num, gpio_shift);
2009 /* clear FLOAT and set SET */
2010 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2011 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2014 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2016 "Set GPIO %d (shift %d) -> input\n",
2017 gpio_num, gpio_shift);
2019 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2026 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2027 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2032 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2037 /* Any port swapping should be handled by caller. */
2039 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2040 /* read GPIO and mask except the float bits */
2041 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2042 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2043 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2044 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2047 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2050 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2053 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2054 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2056 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2059 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2060 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2062 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2066 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2072 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2074 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2079 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081 /* The GPIO should be swapped if swap register is set and active */
2082 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2083 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2084 int gpio_shift = gpio_num +
2085 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2086 u32 gpio_mask = (1 << gpio_shift);
2089 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2090 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2094 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2099 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101 "Clear GPIO INT %d (shift %d) -> output low\n",
2102 gpio_num, gpio_shift);
2103 /* clear SET and set CLR */
2104 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2110 "Set GPIO INT %d (shift %d) -> output high\n",
2111 gpio_num, gpio_shift);
2112 /* clear CLR and set SET */
2113 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2114 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2121 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2122 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2127 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2131 /* Only 2 SPIOs are configurable */
2132 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2133 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2137 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138 /* read SPIO and mask except the float bits */
2139 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2142 case MISC_SPIO_OUTPUT_LOW:
2143 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2144 /* clear FLOAT and set CLR */
2145 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2146 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2149 case MISC_SPIO_OUTPUT_HIGH:
2150 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2151 /* clear FLOAT and set SET */
2152 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2153 spio_reg |= (spio << MISC_SPIO_SET_POS);
2156 case MISC_SPIO_INPUT_HI_Z:
2157 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2159 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2166 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2172 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2174 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2175 switch (bp->link_vars.ieee_fc &
2176 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2177 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2178 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2182 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2183 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2187 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2188 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2192 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2198 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2200 /* Initialize link parameters structure variables
2201 * It is recommended to turn off RX FC for jumbo frames
2202 * for better performance
2204 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2205 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2207 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2210 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2212 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2213 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2215 if (!BP_NOMCP(bp)) {
2216 bnx2x_set_requested_fc(bp);
2217 bnx2x_acquire_phy_lock(bp);
2219 if (load_mode == LOAD_DIAG) {
2220 struct link_params *lp = &bp->link_params;
2221 lp->loopback_mode = LOOPBACK_XGXS;
2222 /* do PHY loopback at 10G speed, if possible */
2223 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2224 if (lp->speed_cap_mask[cfx_idx] &
2225 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2226 lp->req_line_speed[cfx_idx] =
2229 lp->req_line_speed[cfx_idx] =
2234 if (load_mode == LOAD_LOOPBACK_EXT) {
2235 struct link_params *lp = &bp->link_params;
2236 lp->loopback_mode = LOOPBACK_EXT;
2239 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2241 bnx2x_release_phy_lock(bp);
2243 bnx2x_calc_fc_adv(bp);
2245 if (bp->link_vars.link_up) {
2246 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2247 bnx2x_link_report(bp);
2249 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2250 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2253 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2257 void bnx2x_link_set(struct bnx2x *bp)
2259 if (!BP_NOMCP(bp)) {
2260 bnx2x_acquire_phy_lock(bp);
2261 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2262 bnx2x_release_phy_lock(bp);
2264 bnx2x_calc_fc_adv(bp);
2266 BNX2X_ERR("Bootcode is missing - can not set link\n");
2269 static void bnx2x__link_reset(struct bnx2x *bp)
2271 if (!BP_NOMCP(bp)) {
2272 bnx2x_acquire_phy_lock(bp);
2273 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2274 bnx2x_release_phy_lock(bp);
2276 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2279 void bnx2x_force_link_reset(struct bnx2x *bp)
2281 bnx2x_acquire_phy_lock(bp);
2282 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2283 bnx2x_release_phy_lock(bp);
2286 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2290 if (!BP_NOMCP(bp)) {
2291 bnx2x_acquire_phy_lock(bp);
2292 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2294 bnx2x_release_phy_lock(bp);
2296 BNX2X_ERR("Bootcode is missing - can not test link\n");
2302 /* Calculates the sum of vn_min_rates.
2303 It's needed for further normalizing of the min_rates.
2305 sum of vn_min_rates.
2307 0 - if all the min_rates are 0.
2308 In the later case fainess algorithm should be deactivated.
2309 If not all min_rates are zero then those that are zeroes will be set to 1.
2311 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2312 struct cmng_init_input *input)
2317 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2318 u32 vn_cfg = bp->mf_config[vn];
2319 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2320 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2322 /* Skip hidden vns */
2323 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 /* If min rate is zero - set it to 1 */
2326 else if (!vn_min_rate)
2327 vn_min_rate = DEF_MIN_RATE;
2331 input->vnic_min_rate[vn] = vn_min_rate;
2334 /* if ETS or all min rates are zeros - disable fairness */
2335 if (BNX2X_IS_ETS_ENABLED(bp)) {
2336 input->flags.cmng_enables &=
2337 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2338 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2339 } else if (all_zero) {
2340 input->flags.cmng_enables &=
2341 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2343 "All MIN values are zeroes fairness will be disabled\n");
2345 input->flags.cmng_enables |=
2346 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2349 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2350 struct cmng_init_input *input)
2353 u32 vn_cfg = bp->mf_config[vn];
2355 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2358 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2361 /* maxCfg in percents of linkspeed */
2362 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2363 } else /* SD modes */
2364 /* maxCfg is absolute in 100Mb units */
2365 vn_max_rate = maxCfg * 100;
2368 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2370 input->vnic_max_rate[vn] = vn_max_rate;
2374 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2376 if (CHIP_REV_IS_SLOW(bp))
2377 return CMNG_FNS_NONE;
2379 return CMNG_FNS_MINMAX;
2381 return CMNG_FNS_NONE;
2384 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2386 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2389 return; /* what should be the default bvalue in this case */
2391 /* For 2 port configuration the absolute function number formula
2393 * abs_func = 2 * vn + BP_PORT + BP_PATH
2395 * and there are 4 functions per port
2397 * For 4 port configuration it is
2398 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2400 * and there are 2 functions per port
2402 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2403 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2405 if (func >= E1H_FUNC_MAX)
2409 MF_CFG_RD(bp, func_mf_config[func].config);
2411 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2412 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2413 bp->flags |= MF_FUNC_DIS;
2415 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2416 bp->flags &= ~MF_FUNC_DIS;
2420 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2422 struct cmng_init_input input;
2423 memset(&input, 0, sizeof(struct cmng_init_input));
2425 input.port_rate = bp->link_vars.line_speed;
2427 if (cmng_type == CMNG_FNS_MINMAX) {
2430 /* read mf conf from shmem */
2432 bnx2x_read_mf_cfg(bp);
2434 /* vn_weight_sum and enable fairness if not 0 */
2435 bnx2x_calc_vn_min(bp, &input);
2437 /* calculate and set min-max rate for each vn */
2439 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2440 bnx2x_calc_vn_max(bp, vn, &input);
2442 /* always enable rate shaping and fairness */
2443 input.flags.cmng_enables |=
2444 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2446 bnx2x_init_cmng(&input, &bp->cmng);
2450 /* rate shaping and fairness are disabled */
2452 "rate shaping and fairness are disabled\n");
2455 static void storm_memset_cmng(struct bnx2x *bp,
2456 struct cmng_init *cmng,
2460 size_t size = sizeof(struct cmng_struct_per_port);
2462 u32 addr = BAR_XSTRORM_INTMEM +
2463 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2465 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2467 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2468 int func = func_by_vn(bp, vn);
2470 addr = BAR_XSTRORM_INTMEM +
2471 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2472 size = sizeof(struct rate_shaping_vars_per_vn);
2473 __storm_memset_struct(bp, addr, size,
2474 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2476 addr = BAR_XSTRORM_INTMEM +
2477 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2478 size = sizeof(struct fairness_vars_per_vn);
2479 __storm_memset_struct(bp, addr, size,
2480 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2484 /* This function is called upon link interrupt */
2485 static void bnx2x_link_attn(struct bnx2x *bp)
2487 /* Make sure that we are synced with the current statistics */
2488 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2490 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2492 if (bp->link_vars.link_up) {
2494 /* dropless flow control */
2495 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2496 int port = BP_PORT(bp);
2497 u32 pause_enabled = 0;
2499 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2502 REG_WR(bp, BAR_USTRORM_INTMEM +
2503 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2507 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2508 struct host_port_stats *pstats;
2510 pstats = bnx2x_sp(bp, port_stats);
2511 /* reset old mac stats */
2512 memset(&(pstats->mac_stx[0]), 0,
2513 sizeof(struct mac_stx));
2515 if (bp->state == BNX2X_STATE_OPEN)
2516 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2519 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2520 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2522 if (cmng_fns != CMNG_FNS_NONE) {
2523 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2524 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2526 /* rate shaping and fairness are disabled */
2528 "single function mode without fairness\n");
2531 __bnx2x_link_report(bp);
2534 bnx2x_link_sync_notify(bp);
2537 void bnx2x__link_status_update(struct bnx2x *bp)
2539 if (bp->state != BNX2X_STATE_OPEN)
2542 /* read updated dcb configuration */
2544 bnx2x_dcbx_pmf_update(bp);
2545 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2546 if (bp->link_vars.link_up)
2547 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2549 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2550 /* indicate link status */
2551 bnx2x_link_report(bp);
2554 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2555 SUPPORTED_10baseT_Full |
2556 SUPPORTED_100baseT_Half |
2557 SUPPORTED_100baseT_Full |
2558 SUPPORTED_1000baseT_Full |
2559 SUPPORTED_2500baseX_Full |
2560 SUPPORTED_10000baseT_Full |
2565 SUPPORTED_Asym_Pause);
2566 bp->port.advertising[0] = bp->port.supported[0];
2568 bp->link_params.bp = bp;
2569 bp->link_params.port = BP_PORT(bp);
2570 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2571 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2572 bp->link_params.req_line_speed[0] = SPEED_10000;
2573 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2574 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2575 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2576 bp->link_vars.line_speed = SPEED_10000;
2577 bp->link_vars.link_status =
2578 (LINK_STATUS_LINK_UP |
2579 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2580 bp->link_vars.link_up = 1;
2581 bp->link_vars.duplex = DUPLEX_FULL;
2582 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2583 __bnx2x_link_report(bp);
2584 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2588 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2589 u16 vlan_val, u8 allowed_prio)
2591 struct bnx2x_func_state_params func_params = {0};
2592 struct bnx2x_func_afex_update_params *f_update_params =
2593 &func_params.params.afex_update;
2595 func_params.f_obj = &bp->func_obj;
2596 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2598 /* no need to wait for RAMROD completion, so don't
2599 * set RAMROD_COMP_WAIT flag
2602 f_update_params->vif_id = vifid;
2603 f_update_params->afex_default_vlan = vlan_val;
2604 f_update_params->allowed_priorities = allowed_prio;
2606 /* if ramrod can not be sent, response to MCP immediately */
2607 if (bnx2x_func_state_change(bp, &func_params) < 0)
2608 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2613 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2614 u16 vif_index, u8 func_bit_map)
2616 struct bnx2x_func_state_params func_params = {0};
2617 struct bnx2x_func_afex_viflists_params *update_params =
2618 &func_params.params.afex_viflists;
2622 /* validate only LIST_SET and LIST_GET are received from switch */
2623 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2624 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2627 func_params.f_obj = &bp->func_obj;
2628 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2630 /* set parameters according to cmd_type */
2631 update_params->afex_vif_list_command = cmd_type;
2632 update_params->vif_list_index = cpu_to_le16(vif_index);
2633 update_params->func_bit_map =
2634 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2635 update_params->func_to_clear = 0;
2637 (cmd_type == VIF_LIST_RULE_GET) ?
2638 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2639 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2641 /* if ramrod can not be sent, respond to MCP immediately for
2642 * SET and GET requests (other are not triggered from MCP)
2644 rc = bnx2x_func_state_change(bp, &func_params);
2646 bnx2x_fw_command(bp, drv_msg_code, 0);
2651 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2653 struct afex_stats afex_stats;
2654 u32 func = BP_ABS_FUNC(bp);
2661 u32 addr_to_write, vifid, addrs, stats_type, i;
2663 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2664 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2666 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2667 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2670 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2671 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2672 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2674 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2676 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2680 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2681 addr_to_write = SHMEM2_RD(bp,
2682 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2683 stats_type = SHMEM2_RD(bp,
2684 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2687 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2690 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2692 /* write response to scratchpad, for MCP */
2693 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2694 REG_WR(bp, addr_to_write + i*sizeof(u32),
2695 *(((u32 *)(&afex_stats))+i));
2697 /* send ack message to MCP */
2698 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2701 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2702 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2703 bp->mf_config[BP_VN(bp)] = mf_config;
2705 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2708 /* if VIF_SET is "enabled" */
2709 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2710 /* set rate limit directly to internal RAM */
2711 struct cmng_init_input cmng_input;
2712 struct rate_shaping_vars_per_vn m_rs_vn;
2713 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2714 u32 addr = BAR_XSTRORM_INTMEM +
2715 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2717 bp->mf_config[BP_VN(bp)] = mf_config;
2719 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2720 m_rs_vn.vn_counter.rate =
2721 cmng_input.vnic_max_rate[BP_VN(bp)];
2722 m_rs_vn.vn_counter.quota =
2723 (m_rs_vn.vn_counter.rate *
2724 RS_PERIODIC_TIMEOUT_USEC) / 8;
2726 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2728 /* read relevant values from mf_cfg struct in shmem */
2730 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2731 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2732 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2734 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2735 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2736 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2737 vlan_prio = (mf_config &
2738 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2739 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2740 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2743 func_mf_config[func].afex_config) &
2744 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2745 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2748 func_mf_config[func].afex_config) &
2749 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2750 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2752 /* send ramrod to FW, return in case of failure */
2753 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2757 bp->afex_def_vlan_tag = vlan_val;
2758 bp->afex_vlan_mode = vlan_mode;
2760 /* notify link down because BP->flags is disabled */
2761 bnx2x_link_report(bp);
2763 /* send INVALID VIF ramrod to FW */
2764 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2766 /* Reset the default afex VLAN */
2767 bp->afex_def_vlan_tag = -1;
2772 static void bnx2x_pmf_update(struct bnx2x *bp)
2774 int port = BP_PORT(bp);
2778 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2781 * We need the mb() to ensure the ordering between the writing to
2782 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2786 /* queue a periodic task */
2787 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2789 bnx2x_dcbx_pmf_update(bp);
2791 /* enable nig attention */
2792 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2793 if (bp->common.int_block == INT_BLOCK_HC) {
2794 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2795 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2796 } else if (!CHIP_IS_E1x(bp)) {
2797 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2798 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2801 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2809 * General service functions
2812 /* send the MCP a request, block until there is a reply */
2813 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2815 int mb_idx = BP_FW_MB_IDX(bp);
2819 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2821 mutex_lock(&bp->fw_mb_mutex);
2823 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2824 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2826 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2827 (command | seq), param);
2830 /* let the FW do it's magic ... */
2833 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2835 /* Give the FW up to 5 second (500*10ms) */
2836 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2838 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2839 cnt*delay, rc, seq);
2841 /* is this a reply to our command? */
2842 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2843 rc &= FW_MSG_CODE_MASK;
2846 BNX2X_ERR("FW failed to respond!\n");
2850 mutex_unlock(&bp->fw_mb_mutex);
2856 static void storm_memset_func_cfg(struct bnx2x *bp,
2857 struct tstorm_eth_function_common_config *tcfg,
2860 size_t size = sizeof(struct tstorm_eth_function_common_config);
2862 u32 addr = BAR_TSTRORM_INTMEM +
2863 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2865 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2868 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2870 if (CHIP_IS_E1x(bp)) {
2871 struct tstorm_eth_function_common_config tcfg = {0};
2873 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2876 /* Enable the function in the FW */
2877 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2878 storm_memset_func_en(bp, p->func_id, 1);
2881 if (p->func_flgs & FUNC_FLG_SPQ) {
2882 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2883 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2884 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2889 * bnx2x_get_tx_only_flags - Return common flags
2893 * @zero_stats TRUE if statistics zeroing is needed
2895 * Return the flags that are common for the Tx-only and not normal connections.
2897 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2898 struct bnx2x_fastpath *fp,
2901 unsigned long flags = 0;
2903 /* PF driver will always initialize the Queue to an ACTIVE state */
2904 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2906 /* tx only connections collect statistics (on the same index as the
2907 * parent connection). The statistics are zeroed when the parent
2908 * connection is initialized.
2911 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2913 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2919 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2920 struct bnx2x_fastpath *fp,
2923 unsigned long flags = 0;
2925 /* calculate other queue flags */
2927 __set_bit(BNX2X_Q_FLG_OV, &flags);
2929 if (IS_FCOE_FP(fp)) {
2930 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2931 /* For FCoE - force usage of default priority (for afex) */
2932 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2935 if (!fp->disable_tpa) {
2936 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2937 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2938 if (fp->mode == TPA_MODE_GRO)
2939 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
2943 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2944 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
2947 /* Always set HW VLAN stripping */
2948 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2950 /* configure silent vlan removal */
2952 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2955 return flags | bnx2x_get_common_flags(bp, fp, true);
2958 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2959 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
2962 gen_init->stat_id = bnx2x_stats_id(fp);
2963 gen_init->spcl_id = fp->cl_id;
2965 /* Always use mini-jumbo MTU for FCoE L2 ring */
2967 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2969 gen_init->mtu = bp->dev->mtu;
2971 gen_init->cos = cos;
2974 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2975 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2976 struct bnx2x_rxq_setup_params *rxq_init)
2980 u16 tpa_agg_size = 0;
2982 if (!fp->disable_tpa) {
2983 pause->sge_th_lo = SGE_TH_LO(bp);
2984 pause->sge_th_hi = SGE_TH_HI(bp);
2986 /* validate SGE ring has enough to cross high threshold */
2987 WARN_ON(bp->dropless_fc &&
2988 pause->sge_th_hi + FW_PREFETCH_CNT >
2989 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2991 tpa_agg_size = min_t(u32,
2992 (min_t(u32, 8, MAX_SKB_FRAGS) *
2993 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2994 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2996 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2997 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2998 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
3002 /* pause - not for e1 */
3003 if (!CHIP_IS_E1(bp)) {
3004 pause->bd_th_lo = BD_TH_LO(bp);
3005 pause->bd_th_hi = BD_TH_HI(bp);
3007 pause->rcq_th_lo = RCQ_TH_LO(bp);
3008 pause->rcq_th_hi = RCQ_TH_HI(bp);
3010 * validate that rings have enough entries to cross
3013 WARN_ON(bp->dropless_fc &&
3014 pause->bd_th_hi + FW_PREFETCH_CNT >
3016 WARN_ON(bp->dropless_fc &&
3017 pause->rcq_th_hi + FW_PREFETCH_CNT >
3018 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3024 rxq_init->dscr_map = fp->rx_desc_mapping;
3025 rxq_init->sge_map = fp->rx_sge_mapping;
3026 rxq_init->rcq_map = fp->rx_comp_mapping;
3027 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3029 /* This should be a maximum number of data bytes that may be
3030 * placed on the BD (not including paddings).
3032 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3033 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3035 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3036 rxq_init->tpa_agg_sz = tpa_agg_size;
3037 rxq_init->sge_buf_sz = sge_sz;
3038 rxq_init->max_sges_pkt = max_sge;
3039 rxq_init->rss_engine_id = BP_FUNC(bp);
3040 rxq_init->mcast_engine_id = BP_FUNC(bp);
3042 /* Maximum number or simultaneous TPA aggregation for this Queue.
3044 * For PF Clients it should be the maximum avaliable number.
3045 * VF driver(s) may want to define it to a smaller value.
3047 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3049 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3050 rxq_init->fw_sb_id = fp->fw_sb_id;
3053 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3055 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3056 /* configure silent vlan removal
3057 * if multi function mode is afex, then mask default vlan
3059 if (IS_MF_AFEX(bp)) {
3060 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3061 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3065 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3066 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3069 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3070 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3071 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3072 txq_init->fw_sb_id = fp->fw_sb_id;
3075 * set the tss leading client id for TX classfication ==
3076 * leading RSS client id
3078 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3080 if (IS_FCOE_FP(fp)) {
3081 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3082 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3086 static void bnx2x_pf_init(struct bnx2x *bp)
3088 struct bnx2x_func_init_params func_init = {0};
3089 struct event_ring_data eq_data = { {0} };
3092 if (!CHIP_IS_E1x(bp)) {
3093 /* reset IGU PF statistics: MSIX + ATTN */
3095 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3096 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3097 (CHIP_MODE_IS_4_PORT(bp) ?
3098 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3100 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3101 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3102 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3103 (CHIP_MODE_IS_4_PORT(bp) ?
3104 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3107 /* function setup flags */
3108 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3110 /* This flag is relevant for E1x only.
3111 * E2 doesn't have a TPA configuration in a function level.
3113 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3115 func_init.func_flgs = flags;
3116 func_init.pf_id = BP_FUNC(bp);
3117 func_init.func_id = BP_FUNC(bp);
3118 func_init.spq_map = bp->spq_mapping;
3119 func_init.spq_prod = bp->spq_prod_idx;
3121 bnx2x_func_init(bp, &func_init);
3123 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3126 * Congestion management values depend on the link rate
3127 * There is no active link so initial link rate is set to 10 Gbps.
3128 * When the link comes up The congestion management values are
3129 * re-calculated according to the actual link rate.
3131 bp->link_vars.line_speed = SPEED_10000;
3132 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3134 /* Only the PMF sets the HW */
3136 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3138 /* init Event Queue */
3139 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3140 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3141 eq_data.producer = bp->eq_prod;
3142 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3143 eq_data.sb_id = DEF_SB_ID;
3144 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3148 static void bnx2x_e1h_disable(struct bnx2x *bp)
3150 int port = BP_PORT(bp);
3152 bnx2x_tx_disable(bp);
3154 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3157 static void bnx2x_e1h_enable(struct bnx2x *bp)
3159 int port = BP_PORT(bp);
3161 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3163 /* Tx queue should be only reenabled */
3164 netif_tx_wake_all_queues(bp->dev);
3167 * Should not call netif_carrier_on since it will be called if the link
3168 * is up when checking for link state
3172 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3174 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3176 struct eth_stats_info *ether_stat =
3177 &bp->slowpath->drv_info_to_mcp.ether_stat;
3179 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3180 ETH_STAT_INFO_VERSION_LEN);
3182 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3183 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3184 ether_stat->mac_local);
3186 ether_stat->mtu_size = bp->dev->mtu;
3188 if (bp->dev->features & NETIF_F_RXCSUM)
3189 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3190 if (bp->dev->features & NETIF_F_TSO)
3191 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3192 ether_stat->feature_flags |= bp->common.boot_mode;
3194 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3196 ether_stat->txq_size = bp->tx_ring_size;
3197 ether_stat->rxq_size = bp->rx_ring_size;
3200 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3202 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3203 struct fcoe_stats_info *fcoe_stat =
3204 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3206 if (!CNIC_LOADED(bp))
3209 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3210 bp->fip_mac, ETH_ALEN);
3212 fcoe_stat->qos_priority =
3213 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3215 /* insert FCoE stats from ramrod response */
3217 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3218 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3219 tstorm_queue_statistics;
3221 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3222 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3223 xstorm_queue_statistics;
3225 struct fcoe_statistics_params *fw_fcoe_stat =
3226 &bp->fw_stats_data->fcoe;
3228 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
3229 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3231 ADD_64(fcoe_stat->rx_bytes_hi,
3232 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3233 fcoe_stat->rx_bytes_lo,
3234 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3236 ADD_64(fcoe_stat->rx_bytes_hi,
3237 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3238 fcoe_stat->rx_bytes_lo,
3239 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3241 ADD_64(fcoe_stat->rx_bytes_hi,
3242 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3243 fcoe_stat->rx_bytes_lo,
3244 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3246 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3247 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3249 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3250 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3252 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3253 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3255 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3256 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3258 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
3259 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3261 ADD_64(fcoe_stat->tx_bytes_hi,
3262 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3263 fcoe_stat->tx_bytes_lo,
3264 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3266 ADD_64(fcoe_stat->tx_bytes_hi,
3267 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3268 fcoe_stat->tx_bytes_lo,
3269 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3271 ADD_64(fcoe_stat->tx_bytes_hi,
3272 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3273 fcoe_stat->tx_bytes_lo,
3274 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3276 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3277 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3279 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3280 fcoe_q_xstorm_stats->ucast_pkts_sent);
3282 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3283 fcoe_q_xstorm_stats->bcast_pkts_sent);
3285 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3286 fcoe_q_xstorm_stats->mcast_pkts_sent);
3289 /* ask L5 driver to add data to the struct */
3290 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3293 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3295 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3296 struct iscsi_stats_info *iscsi_stat =
3297 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3299 if (!CNIC_LOADED(bp))
3302 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3303 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3305 iscsi_stat->qos_priority =
3306 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3308 /* ask L5 driver to add data to the struct */
3309 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3312 /* called due to MCP event (on pmf):
3313 * reread new bandwidth configuration
3315 * notify others function about the change
3317 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3319 if (bp->link_vars.link_up) {
3320 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3321 bnx2x_link_sync_notify(bp);
3323 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3326 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3328 bnx2x_config_mf_bw(bp);
3329 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3332 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3334 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3335 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3338 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3340 enum drv_info_opcode op_code;
3341 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3343 /* if drv_info version supported by MFW doesn't match - send NACK */
3344 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3345 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3349 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3350 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3352 memset(&bp->slowpath->drv_info_to_mcp, 0,
3353 sizeof(union drv_info_to_mcp));
3356 case ETH_STATS_OPCODE:
3357 bnx2x_drv_info_ether_stat(bp);
3359 case FCOE_STATS_OPCODE:
3360 bnx2x_drv_info_fcoe_stat(bp);
3362 case ISCSI_STATS_OPCODE:
3363 bnx2x_drv_info_iscsi_stat(bp);
3366 /* if op code isn't supported - send NACK */
3367 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3371 /* if we got drv_info attn from MFW then these fields are defined in
3374 SHMEM2_WR(bp, drv_info_host_addr_lo,
3375 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3376 SHMEM2_WR(bp, drv_info_host_addr_hi,
3377 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3379 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3382 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3384 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3386 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3389 * This is the only place besides the function initialization
3390 * where the bp->flags can change so it is done without any
3393 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3394 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3395 bp->flags |= MF_FUNC_DIS;
3397 bnx2x_e1h_disable(bp);
3399 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3400 bp->flags &= ~MF_FUNC_DIS;
3402 bnx2x_e1h_enable(bp);
3404 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3406 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3407 bnx2x_config_mf_bw(bp);
3408 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3411 /* Report results to MCP */
3413 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3415 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3418 /* must be called under the spq lock */
3419 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3421 struct eth_spe *next_spe = bp->spq_prod_bd;
3423 if (bp->spq_prod_bd == bp->spq_last_bd) {
3424 bp->spq_prod_bd = bp->spq;
3425 bp->spq_prod_idx = 0;
3426 DP(BNX2X_MSG_SP, "end of spq\n");
3434 /* must be called under the spq lock */
3435 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3437 int func = BP_FUNC(bp);
3440 * Make sure that BD data is updated before writing the producer:
3441 * BD data is written to the memory, the producer is read from the
3442 * memory, thus we need a full memory barrier to ensure the ordering.
3446 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3452 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3454 * @cmd: command to check
3455 * @cmd_type: command type
3457 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3459 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3460 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3461 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3462 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3463 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3464 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3465 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3474 * bnx2x_sp_post - place a single command on an SP ring
3476 * @bp: driver handle
3477 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3478 * @cid: SW CID the command is related to
3479 * @data_hi: command private data address (high 32 bits)
3480 * @data_lo: command private data address (low 32 bits)
3481 * @cmd_type: command type (e.g. NONE, ETH)
3483 * SP data is handled as if it's always an address pair, thus data fields are
3484 * not swapped to little endian in upper functions. Instead this function swaps
3485 * data as if it's two u32 fields.
3487 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3488 u32 data_hi, u32 data_lo, int cmd_type)
3490 struct eth_spe *spe;
3492 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3494 #ifdef BNX2X_STOP_ON_ERROR
3495 if (unlikely(bp->panic)) {
3496 BNX2X_ERR("Can't post SP when there is panic\n");
3501 spin_lock_bh(&bp->spq_lock);
3504 if (!atomic_read(&bp->eq_spq_left)) {
3505 BNX2X_ERR("BUG! EQ ring full!\n");
3506 spin_unlock_bh(&bp->spq_lock);
3510 } else if (!atomic_read(&bp->cq_spq_left)) {
3511 BNX2X_ERR("BUG! SPQ ring full!\n");
3512 spin_unlock_bh(&bp->spq_lock);
3517 spe = bnx2x_sp_get_next(bp);
3519 /* CID needs port number to be encoded int it */
3520 spe->hdr.conn_and_cmd_data =
3521 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3524 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3526 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3527 SPE_HDR_FUNCTION_ID);
3529 spe->hdr.type = cpu_to_le16(type);
3531 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3532 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3535 * It's ok if the actual decrement is issued towards the memory
3536 * somewhere between the spin_lock and spin_unlock. Thus no
3537 * more explict memory barrier is needed.
3540 atomic_dec(&bp->eq_spq_left);
3542 atomic_dec(&bp->cq_spq_left);
3546 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3547 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3548 (u32)(U64_LO(bp->spq_mapping) +
3549 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3550 HW_CID(bp, cid), data_hi, data_lo, type,
3551 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3553 bnx2x_sp_prod_update(bp);
3554 spin_unlock_bh(&bp->spq_lock);
3558 /* acquire split MCP access lock register */
3559 static int bnx2x_acquire_alr(struct bnx2x *bp)
3565 for (j = 0; j < 1000; j++) {
3567 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3568 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3569 if (val & (1L << 31))
3574 if (!(val & (1L << 31))) {
3575 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3582 /* release split MCP access lock register */
3583 static void bnx2x_release_alr(struct bnx2x *bp)
3585 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
3588 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3589 #define BNX2X_DEF_SB_IDX 0x0002
3591 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3593 struct host_sp_status_block *def_sb = bp->def_status_blk;
3596 barrier(); /* status block is written to by the chip */
3597 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3598 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3599 rc |= BNX2X_DEF_SB_ATT_IDX;
3602 if (bp->def_idx != def_sb->sp_sb.running_index) {
3603 bp->def_idx = def_sb->sp_sb.running_index;
3604 rc |= BNX2X_DEF_SB_IDX;
3607 /* Do not reorder: indecies reading should complete before handling */
3613 * slow path service functions
3616 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3618 int port = BP_PORT(bp);
3619 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3620 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3621 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3622 NIG_REG_MASK_INTERRUPT_PORT0;
3627 if (bp->attn_state & asserted)
3628 BNX2X_ERR("IGU ERROR\n");
3630 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3631 aeu_mask = REG_RD(bp, aeu_addr);
3633 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3634 aeu_mask, asserted);
3635 aeu_mask &= ~(asserted & 0x3ff);
3636 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3638 REG_WR(bp, aeu_addr, aeu_mask);
3639 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3641 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3642 bp->attn_state |= asserted;
3643 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3645 if (asserted & ATTN_HARD_WIRED_MASK) {
3646 if (asserted & ATTN_NIG_FOR_FUNC) {
3648 bnx2x_acquire_phy_lock(bp);
3650 /* save nig interrupt mask */
3651 nig_mask = REG_RD(bp, nig_int_mask_addr);
3653 /* If nig_mask is not set, no need to call the update
3657 REG_WR(bp, nig_int_mask_addr, 0);
3659 bnx2x_link_attn(bp);
3662 /* handle unicore attn? */
3664 if (asserted & ATTN_SW_TIMER_4_FUNC)
3665 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3667 if (asserted & GPIO_2_FUNC)
3668 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3670 if (asserted & GPIO_3_FUNC)
3671 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3673 if (asserted & GPIO_4_FUNC)
3674 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3677 if (asserted & ATTN_GENERAL_ATTN_1) {
3678 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3679 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3681 if (asserted & ATTN_GENERAL_ATTN_2) {
3682 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3683 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3685 if (asserted & ATTN_GENERAL_ATTN_3) {
3686 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3690 if (asserted & ATTN_GENERAL_ATTN_4) {
3691 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3692 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3694 if (asserted & ATTN_GENERAL_ATTN_5) {
3695 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3696 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3698 if (asserted & ATTN_GENERAL_ATTN_6) {
3699 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3700 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3704 } /* if hardwired */
3706 if (bp->common.int_block == INT_BLOCK_HC)
3707 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3708 COMMAND_REG_ATTN_BITS_SET);
3710 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3712 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3713 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3714 REG_WR(bp, reg_addr, asserted);
3716 /* now set back the mask */
3717 if (asserted & ATTN_NIG_FOR_FUNC) {
3718 /* Verify that IGU ack through BAR was written before restoring
3719 * NIG mask. This loop should exit after 2-3 iterations max.
3721 if (bp->common.int_block != INT_BLOCK_HC) {
3722 u32 cnt = 0, igu_acked;
3724 igu_acked = REG_RD(bp,
3725 IGU_REG_ATTENTION_ACK_BITS);
3726 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3727 (++cnt < MAX_IGU_ATTN_ACK_TO));
3730 "Failed to verify IGU ack on time\n");
3733 REG_WR(bp, nig_int_mask_addr, nig_mask);
3734 bnx2x_release_phy_lock(bp);
3738 static void bnx2x_fan_failure(struct bnx2x *bp)
3740 int port = BP_PORT(bp);
3742 /* mark the failure */
3745 dev_info.port_hw_config[port].external_phy_config);
3747 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3748 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3749 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3752 /* log the failure */
3753 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3754 "Please contact OEM Support for assistance\n");
3757 * Scheudle device reset (unload)
3758 * This is due to some boards consuming sufficient power when driver is
3759 * up to overheat if fan fails.
3761 smp_mb__before_clear_bit();
3762 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3763 smp_mb__after_clear_bit();
3764 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3768 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3770 int port = BP_PORT(bp);
3774 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3775 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3777 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3779 val = REG_RD(bp, reg_offset);
3780 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3781 REG_WR(bp, reg_offset, val);
3783 BNX2X_ERR("SPIO5 hw attention\n");
3785 /* Fan failure attention */
3786 bnx2x_hw_reset_phy(&bp->link_params);
3787 bnx2x_fan_failure(bp);
3790 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3791 bnx2x_acquire_phy_lock(bp);
3792 bnx2x_handle_module_detect_int(&bp->link_params);
3793 bnx2x_release_phy_lock(bp);
3796 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3798 val = REG_RD(bp, reg_offset);
3799 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3800 REG_WR(bp, reg_offset, val);
3802 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3803 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3808 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3812 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3814 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3815 BNX2X_ERR("DB hw attention 0x%x\n", val);
3816 /* DORQ discard attention */
3818 BNX2X_ERR("FATAL error from DORQ\n");
3821 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3823 int port = BP_PORT(bp);
3826 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3827 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3829 val = REG_RD(bp, reg_offset);
3830 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3831 REG_WR(bp, reg_offset, val);
3833 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3834 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3839 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3843 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3845 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3846 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3847 /* CFC error attention */
3849 BNX2X_ERR("FATAL error from CFC\n");
3852 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3853 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3854 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3855 /* RQ_USDMDP_FIFO_OVERFLOW */
3857 BNX2X_ERR("FATAL error from PXP\n");
3859 if (!CHIP_IS_E1x(bp)) {
3860 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3861 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3865 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3867 int port = BP_PORT(bp);
3870 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3871 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3873 val = REG_RD(bp, reg_offset);
3874 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3875 REG_WR(bp, reg_offset, val);
3877 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3878 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3883 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3887 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3889 if (attn & BNX2X_PMF_LINK_ASSERT) {
3890 int func = BP_FUNC(bp);
3892 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3893 bnx2x_read_mf_cfg(bp);
3894 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3895 func_mf_config[BP_ABS_FUNC(bp)].config);
3897 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3898 if (val & DRV_STATUS_DCC_EVENT_MASK)
3900 (val & DRV_STATUS_DCC_EVENT_MASK));
3902 if (val & DRV_STATUS_SET_MF_BW)
3903 bnx2x_set_mf_bw(bp);
3905 if (val & DRV_STATUS_DRV_INFO_REQ)
3906 bnx2x_handle_drv_info_req(bp);
3907 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3908 bnx2x_pmf_update(bp);
3911 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3912 bp->dcbx_enabled > 0)
3913 /* start dcbx state machine */
3914 bnx2x_dcbx_set_params(bp,
3915 BNX2X_DCBX_STATE_NEG_RECEIVED);
3916 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3917 bnx2x_handle_afex_cmd(bp,
3918 val & DRV_STATUS_AFEX_EVENT_MASK);
3919 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3920 bnx2x_handle_eee_event(bp);
3921 if (bp->link_vars.periodic_flags &
3922 PERIODIC_FLAGS_LINK_EVENT) {
3923 /* sync with link */
3924 bnx2x_acquire_phy_lock(bp);
3925 bp->link_vars.periodic_flags &=
3926 ~PERIODIC_FLAGS_LINK_EVENT;
3927 bnx2x_release_phy_lock(bp);
3929 bnx2x_link_sync_notify(bp);
3930 bnx2x_link_report(bp);
3932 /* Always call it here: bnx2x_link_report() will
3933 * prevent the link indication duplication.
3935 bnx2x__link_status_update(bp);
3936 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3938 BNX2X_ERR("MC assert!\n");
3939 bnx2x_mc_assert(bp);
3940 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3941 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3942 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3946 } else if (attn & BNX2X_MCP_ASSERT) {
3948 BNX2X_ERR("MCP assert!\n");
3949 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3953 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3956 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3957 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3958 if (attn & BNX2X_GRC_TIMEOUT) {
3959 val = CHIP_IS_E1(bp) ? 0 :
3960 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3961 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3963 if (attn & BNX2X_GRC_RSV) {
3964 val = CHIP_IS_E1(bp) ? 0 :
3965 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3966 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3968 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3974 * 0-7 - Engine0 load counter.
3975 * 8-15 - Engine1 load counter.
3976 * 16 - Engine0 RESET_IN_PROGRESS bit.
3977 * 17 - Engine1 RESET_IN_PROGRESS bit.
3978 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
3980 * 19 - Engine1 ONE_IS_LOADED.
3981 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
3982 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
3983 * just the one belonging to its engine).
3986 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
3988 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
3989 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
3990 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
3991 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
3992 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
3993 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
3994 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
3997 * Set the GLOBAL_RESET bit.
3999 * Should be run under rtnl lock
4001 void bnx2x_set_reset_global(struct bnx2x *bp)
4004 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4005 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4006 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4011 * Clear the GLOBAL_RESET bit.
4013 * Should be run under rtnl lock
4015 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4019 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4020 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4021 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4025 * Checks the GLOBAL_RESET bit.
4027 * should be run under rtnl lock
4029 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4031 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4033 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4034 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4038 * Clear RESET_IN_PROGRESS bit for the current engine.
4040 * Should be run under rtnl lock
4042 static void bnx2x_set_reset_done(struct bnx2x *bp)
4045 u32 bit = BP_PATH(bp) ?
4046 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4047 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4048 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4052 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4054 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4058 * Set RESET_IN_PROGRESS for the current engine.
4060 * should be run under rtnl lock
4062 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4065 u32 bit = BP_PATH(bp) ?
4066 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4067 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4068 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4072 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4073 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4077 * Checks the RESET_IN_PROGRESS bit for the given engine.
4078 * should be run under rtnl lock
4080 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4082 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4084 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4086 /* return false if bit is set */
4087 return (val & bit) ? false : true;
4091 * set pf load for the current pf.
4093 * should be run under rtnl lock
4095 void bnx2x_set_pf_load(struct bnx2x *bp)
4098 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4099 BNX2X_PATH0_LOAD_CNT_MASK;
4100 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4101 BNX2X_PATH0_LOAD_CNT_SHIFT;
4103 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4104 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4106 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4108 /* get the current counter value */
4109 val1 = (val & mask) >> shift;
4111 /* set bit of that PF */
4112 val1 |= (1 << bp->pf_num);
4114 /* clear the old value */
4117 /* set the new one */
4118 val |= ((val1 << shift) & mask);
4120 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4121 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4125 * bnx2x_clear_pf_load - clear pf load mark
4127 * @bp: driver handle
4129 * Should be run under rtnl lock.
4130 * Decrements the load counter for the current engine. Returns
4131 * whether other functions are still loaded
4133 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4136 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4137 BNX2X_PATH0_LOAD_CNT_MASK;
4138 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4139 BNX2X_PATH0_LOAD_CNT_SHIFT;
4141 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4142 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4143 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4145 /* get the current counter value */
4146 val1 = (val & mask) >> shift;
4148 /* clear bit of that PF */
4149 val1 &= ~(1 << bp->pf_num);
4151 /* clear the old value */
4154 /* set the new one */
4155 val |= ((val1 << shift) & mask);
4157 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4158 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4163 * Read the load status for the current engine.
4165 * should be run under rtnl lock
4167 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4169 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4170 BNX2X_PATH0_LOAD_CNT_MASK);
4171 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4172 BNX2X_PATH0_LOAD_CNT_SHIFT);
4173 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4175 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4177 val = (val & mask) >> shift;
4179 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4185 static void _print_next_block(int idx, const char *blk)
4187 pr_cont("%s%s", idx ? ", " : "", blk);
4190 static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4195 for (i = 0; sig; i++) {
4196 cur_bit = ((u32)0x1 << i);
4197 if (sig & cur_bit) {
4199 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4201 _print_next_block(par_num++, "BRB");
4203 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4205 _print_next_block(par_num++, "PARSER");
4207 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4209 _print_next_block(par_num++, "TSDM");
4211 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4213 _print_next_block(par_num++,
4216 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4218 _print_next_block(par_num++, "TCM");
4220 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4222 _print_next_block(par_num++, "TSEMI");
4224 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4226 _print_next_block(par_num++, "XPB");
4238 static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4239 bool *global, bool print)
4243 for (i = 0; sig; i++) {
4244 cur_bit = ((u32)0x1 << i);
4245 if (sig & cur_bit) {
4247 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4249 _print_next_block(par_num++, "PBF");
4251 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4253 _print_next_block(par_num++, "QM");
4255 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4257 _print_next_block(par_num++, "TM");
4259 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4261 _print_next_block(par_num++, "XSDM");
4263 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4265 _print_next_block(par_num++, "XCM");
4267 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4269 _print_next_block(par_num++, "XSEMI");
4271 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4273 _print_next_block(par_num++,
4276 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4278 _print_next_block(par_num++, "NIG");
4280 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4282 _print_next_block(par_num++,
4286 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4288 _print_next_block(par_num++, "DEBUG");
4290 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4292 _print_next_block(par_num++, "USDM");
4294 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4296 _print_next_block(par_num++, "UCM");
4298 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4300 _print_next_block(par_num++, "USEMI");
4302 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4304 _print_next_block(par_num++, "UPB");
4306 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4308 _print_next_block(par_num++, "CSDM");
4310 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4312 _print_next_block(par_num++, "CCM");
4324 static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4329 for (i = 0; sig; i++) {
4330 cur_bit = ((u32)0x1 << i);
4331 if (sig & cur_bit) {
4333 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4335 _print_next_block(par_num++, "CSEMI");
4337 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4339 _print_next_block(par_num++, "PXP");
4341 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4343 _print_next_block(par_num++,
4344 "PXPPCICLOCKCLIENT");
4346 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4348 _print_next_block(par_num++, "CFC");
4350 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4352 _print_next_block(par_num++, "CDU");
4354 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4356 _print_next_block(par_num++, "DMAE");
4358 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4360 _print_next_block(par_num++, "IGU");
4362 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4364 _print_next_block(par_num++, "MISC");
4376 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4377 bool *global, bool print)
4381 for (i = 0; sig; i++) {
4382 cur_bit = ((u32)0x1 << i);
4383 if (sig & cur_bit) {
4385 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4387 _print_next_block(par_num++, "MCP ROM");
4390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4392 _print_next_block(par_num++,
4396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4398 _print_next_block(par_num++,
4402 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4404 _print_next_block(par_num++,
4418 static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4423 for (i = 0; sig; i++) {
4424 cur_bit = ((u32)0x1 << i);
4425 if (sig & cur_bit) {
4427 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4429 _print_next_block(par_num++, "PGLUE_B");
4431 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4433 _print_next_block(par_num++, "ATC");
4445 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4448 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4449 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4450 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4451 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4452 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4454 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4455 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4456 sig[0] & HW_PRTY_ASSERT_SET_0,
4457 sig[1] & HW_PRTY_ASSERT_SET_1,
4458 sig[2] & HW_PRTY_ASSERT_SET_2,
4459 sig[3] & HW_PRTY_ASSERT_SET_3,
4460 sig[4] & HW_PRTY_ASSERT_SET_4);
4463 "Parity errors detected in blocks: ");
4464 par_num = bnx2x_check_blocks_with_parity0(
4465 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4466 par_num = bnx2x_check_blocks_with_parity1(
4467 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4468 par_num = bnx2x_check_blocks_with_parity2(
4469 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4470 par_num = bnx2x_check_blocks_with_parity3(
4471 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4472 par_num = bnx2x_check_blocks_with_parity4(
4473 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4484 * bnx2x_chk_parity_attn - checks for parity attentions.
4486 * @bp: driver handle
4487 * @global: true if there was a global attention
4488 * @print: show parity attention in syslog
4490 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4492 struct attn_route attn = { {0} };
4493 int port = BP_PORT(bp);
4495 attn.sig[0] = REG_RD(bp,
4496 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4498 attn.sig[1] = REG_RD(bp,
4499 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4501 attn.sig[2] = REG_RD(bp,
4502 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4504 attn.sig[3] = REG_RD(bp,
4505 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4508 if (!CHIP_IS_E1x(bp))
4509 attn.sig[4] = REG_RD(bp,
4510 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4513 return bnx2x_parity_attn(bp, global, print, attn.sig);
4517 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4520 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4522 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4523 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4524 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4525 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4526 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4527 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4528 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4529 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4530 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4531 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4533 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4534 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4536 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4537 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4538 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4539 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4540 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4541 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4542 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4543 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4545 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4546 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4547 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4548 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4549 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4550 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4551 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4552 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4553 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4554 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4555 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4556 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4557 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4558 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4559 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4562 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4563 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4564 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4565 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4566 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4571 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4573 struct attn_route attn, *group_mask;
4574 int port = BP_PORT(bp);
4579 bool global = false;
4581 /* need to take HW lock because MCP or other port might also
4582 try to handle this event */
4583 bnx2x_acquire_alr(bp);
4585 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4586 #ifndef BNX2X_STOP_ON_ERROR
4587 bp->recovery_state = BNX2X_RECOVERY_INIT;
4588 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4589 /* Disable HW interrupts */
4590 bnx2x_int_disable(bp);
4591 /* In case of parity errors don't handle attentions so that
4592 * other function would "see" parity errors.
4597 bnx2x_release_alr(bp);
4601 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4602 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4603 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4604 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4605 if (!CHIP_IS_E1x(bp))
4607 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4611 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4612 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4614 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4615 if (deasserted & (1 << index)) {
4616 group_mask = &bp->attn_group[index];
4618 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4620 group_mask->sig[0], group_mask->sig[1],
4621 group_mask->sig[2], group_mask->sig[3],
4622 group_mask->sig[4]);
4624 bnx2x_attn_int_deasserted4(bp,
4625 attn.sig[4] & group_mask->sig[4]);
4626 bnx2x_attn_int_deasserted3(bp,
4627 attn.sig[3] & group_mask->sig[3]);
4628 bnx2x_attn_int_deasserted1(bp,
4629 attn.sig[1] & group_mask->sig[1]);
4630 bnx2x_attn_int_deasserted2(bp,
4631 attn.sig[2] & group_mask->sig[2]);
4632 bnx2x_attn_int_deasserted0(bp,
4633 attn.sig[0] & group_mask->sig[0]);
4637 bnx2x_release_alr(bp);
4639 if (bp->common.int_block == INT_BLOCK_HC)
4640 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4641 COMMAND_REG_ATTN_BITS_CLR);
4643 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4646 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4647 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4648 REG_WR(bp, reg_addr, val);
4650 if (~bp->attn_state & deasserted)
4651 BNX2X_ERR("IGU ERROR\n");
4653 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4654 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4656 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4657 aeu_mask = REG_RD(bp, reg_addr);
4659 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4660 aeu_mask, deasserted);
4661 aeu_mask |= (deasserted & 0x3ff);
4662 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4664 REG_WR(bp, reg_addr, aeu_mask);
4665 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4667 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4668 bp->attn_state &= ~deasserted;
4669 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4672 static void bnx2x_attn_int(struct bnx2x *bp)
4674 /* read local copy of bits */
4675 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4677 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4679 u32 attn_state = bp->attn_state;
4681 /* look for changed bits */
4682 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4683 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4686 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4687 attn_bits, attn_ack, asserted, deasserted);
4689 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4690 BNX2X_ERR("BAD attention state\n");
4692 /* handle bits that were raised */
4694 bnx2x_attn_int_asserted(bp, asserted);
4697 bnx2x_attn_int_deasserted(bp, deasserted);
4700 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4701 u16 index, u8 op, u8 update)
4703 u32 igu_addr = bp->igu_base_addr;
4704 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4705 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4709 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4711 /* No memory barriers */
4712 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4713 mmiowb(); /* keep prod updates ordered */
4716 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4717 union event_ring_elem *elem)
4719 u8 err = elem->message.error;
4721 if (!bp->cnic_eth_dev.starting_cid ||
4722 (cid < bp->cnic_eth_dev.starting_cid &&
4723 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4726 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4728 if (unlikely(err)) {
4730 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4732 bnx2x_panic_dump(bp);
4734 bnx2x_cnic_cfc_comp(bp, cid, err);
4738 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4740 struct bnx2x_mcast_ramrod_params rparam;
4743 memset(&rparam, 0, sizeof(rparam));
4745 rparam.mcast_obj = &bp->mcast_obj;
4747 netif_addr_lock_bh(bp->dev);
4749 /* Clear pending state for the last command */
4750 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4752 /* If there are pending mcast commands - send them */
4753 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4754 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4756 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4760 netif_addr_unlock_bh(bp->dev);
4763 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4764 union event_ring_elem *elem)
4766 unsigned long ramrod_flags = 0;
4768 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4769 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4771 /* Always push next commands out, don't wait here */
4772 __set_bit(RAMROD_CONT, &ramrod_flags);
4774 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4775 case BNX2X_FILTER_MAC_PENDING:
4776 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4777 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4778 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4780 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4783 case BNX2X_FILTER_MCAST_PENDING:
4784 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
4785 /* This is only relevant for 57710 where multicast MACs are
4786 * configured as unicast MACs using the same ramrod.
4788 bnx2x_handle_mcast_eqe(bp);
4791 BNX2X_ERR("Unsupported classification command: %d\n",
4792 elem->message.data.eth_event.echo);
4796 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4799 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4801 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4805 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4807 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4809 netif_addr_lock_bh(bp->dev);
4811 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4813 /* Send rx_mode command again if was requested */
4814 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4815 bnx2x_set_storm_rx_mode(bp);
4816 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4818 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4819 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4821 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4823 netif_addr_unlock_bh(bp->dev);
4826 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4827 union event_ring_elem *elem)
4829 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4831 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4832 elem->message.data.vif_list_event.func_bit_map);
4833 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4834 elem->message.data.vif_list_event.func_bit_map);
4835 } else if (elem->message.data.vif_list_event.echo ==
4836 VIF_LIST_RULE_SET) {
4837 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4838 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4842 /* called with rtnl_lock */
4843 static void bnx2x_after_function_update(struct bnx2x *bp)
4846 struct bnx2x_fastpath *fp;
4847 struct bnx2x_queue_state_params queue_params = {NULL};
4848 struct bnx2x_queue_update_params *q_update_params =
4849 &queue_params.params.update;
4851 /* Send Q update command with afex vlan removal values for all Qs */
4852 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4854 /* set silent vlan removal values according to vlan mode */
4855 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4856 &q_update_params->update_flags);
4857 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4858 &q_update_params->update_flags);
4859 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4861 /* in access mode mark mask and value are 0 to strip all vlans */
4862 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4863 q_update_params->silent_removal_value = 0;
4864 q_update_params->silent_removal_mask = 0;
4866 q_update_params->silent_removal_value =
4867 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4868 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4871 for_each_eth_queue(bp, q) {
4872 /* Set the appropriate Queue object */
4874 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4876 /* send the ramrod */
4877 rc = bnx2x_queue_state_change(bp, &queue_params);
4879 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4884 fp = &bp->fp[FCOE_IDX(bp)];
4885 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4887 /* clear pending completion bit */
4888 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4890 /* mark latest Q bit */
4891 smp_mb__before_clear_bit();
4892 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4893 smp_mb__after_clear_bit();
4895 /* send Q update ramrod for FCoE Q */
4896 rc = bnx2x_queue_state_change(bp, &queue_params);
4898 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4901 /* If no FCoE ring - ACK MCP now */
4902 bnx2x_link_report(bp);
4903 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4907 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4908 struct bnx2x *bp, u32 cid)
4910 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4912 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4913 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4915 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4918 static void bnx2x_eq_int(struct bnx2x *bp)
4920 u16 hw_cons, sw_cons, sw_prod;
4921 union event_ring_elem *elem;
4925 int rc, spqe_cnt = 0;
4926 struct bnx2x_queue_sp_obj *q_obj;
4927 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
4928 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
4930 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
4932 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
4933 * when we get the the next-page we nned to adjust so the loop
4934 * condition below will be met. The next element is the size of a
4935 * regular element and hence incrementing by 1
4937 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
4940 /* This function may never run in parallel with itself for a
4941 * specific bp, thus there is no need in "paired" read memory
4944 sw_cons = bp->eq_cons;
4945 sw_prod = bp->eq_prod;
4947 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
4948 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
4950 for (; sw_cons != hw_cons;
4951 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
4954 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
4956 rc = bnx2x_iov_eq_sp_event(bp, elem);
4958 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
4962 cid = SW_CID(elem->message.data.cfc_del_event.cid);
4963 opcode = elem->message.opcode;
4966 /* handle eq element */
4968 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
4969 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
4970 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
4973 case EVENT_RING_OPCODE_STAT_QUERY:
4974 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
4975 "got statistics comp event %d\n",
4977 /* nothing to do with stats comp */
4980 case EVENT_RING_OPCODE_CFC_DEL:
4981 /* handle according to cid range */
4983 * we may want to verify here that the bp state is
4987 "got delete ramrod for MULTI[%d]\n", cid);
4989 if (CNIC_LOADED(bp) &&
4990 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4993 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4995 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5002 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5003 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5004 if (f_obj->complete_cmd(bp, f_obj,
5005 BNX2X_F_CMD_TX_STOP))
5007 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5010 case EVENT_RING_OPCODE_START_TRAFFIC:
5011 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5012 if (f_obj->complete_cmd(bp, f_obj,
5013 BNX2X_F_CMD_TX_START))
5015 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5018 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5019 echo = elem->message.data.function_update_event.echo;
5020 if (echo == SWITCH_UPDATE) {
5021 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5022 "got FUNC_SWITCH_UPDATE ramrod\n");
5023 if (f_obj->complete_cmd(
5024 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5028 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5029 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5030 f_obj->complete_cmd(bp, f_obj,
5031 BNX2X_F_CMD_AFEX_UPDATE);
5033 /* We will perform the Queues update from
5034 * sp_rtnl task as all Queue SP operations
5035 * should run under rtnl_lock.
5037 smp_mb__before_clear_bit();
5038 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5039 &bp->sp_rtnl_state);
5040 smp_mb__after_clear_bit();
5042 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5047 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5048 f_obj->complete_cmd(bp, f_obj,
5049 BNX2X_F_CMD_AFEX_VIFLISTS);
5050 bnx2x_after_afex_vif_lists(bp, elem);
5052 case EVENT_RING_OPCODE_FUNCTION_START:
5053 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5054 "got FUNC_START ramrod\n");
5055 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5060 case EVENT_RING_OPCODE_FUNCTION_STOP:
5061 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5062 "got FUNC_STOP ramrod\n");
5063 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5069 switch (opcode | bp->state) {
5070 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5072 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5073 BNX2X_STATE_OPENING_WAIT4_PORT):
5074 cid = elem->message.data.eth_event.echo &
5076 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5078 rss_raw->clear_pending(rss_raw);
5081 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5082 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5083 case (EVENT_RING_OPCODE_SET_MAC |
5084 BNX2X_STATE_CLOSING_WAIT4_HALT):
5085 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5087 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5089 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5090 BNX2X_STATE_CLOSING_WAIT4_HALT):
5091 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5092 bnx2x_handle_classification_eqe(bp, elem);
5095 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5097 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5099 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5100 BNX2X_STATE_CLOSING_WAIT4_HALT):
5101 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5102 bnx2x_handle_mcast_eqe(bp);
5105 case (EVENT_RING_OPCODE_FILTERS_RULES |
5107 case (EVENT_RING_OPCODE_FILTERS_RULES |
5109 case (EVENT_RING_OPCODE_FILTERS_RULES |
5110 BNX2X_STATE_CLOSING_WAIT4_HALT):
5111 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5112 bnx2x_handle_rx_mode_eqe(bp);
5115 /* unknown event log error and continue */
5116 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5117 elem->message.opcode, bp->state);
5123 smp_mb__before_atomic_inc();
5124 atomic_add(spqe_cnt, &bp->eq_spq_left);
5126 bp->eq_cons = sw_cons;
5127 bp->eq_prod = sw_prod;
5128 /* Make sure that above mem writes were issued towards the memory */
5131 /* update producer */
5132 bnx2x_update_eq_prod(bp, bp->eq_prod);
5135 static void bnx2x_sp_task(struct work_struct *work)
5137 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5139 DP(BNX2X_MSG_SP, "sp task invoked\n");
5141 /* make sure the atomic interupt_occurred has been written */
5143 if (atomic_read(&bp->interrupt_occurred)) {
5145 /* what work needs to be performed? */
5146 u16 status = bnx2x_update_dsb_idx(bp);
5148 DP(BNX2X_MSG_SP, "status %x\n", status);
5149 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5150 atomic_set(&bp->interrupt_occurred, 0);
5153 if (status & BNX2X_DEF_SB_ATT_IDX) {
5155 status &= ~BNX2X_DEF_SB_ATT_IDX;
5158 /* SP events: STAT_QUERY and others */
5159 if (status & BNX2X_DEF_SB_IDX) {
5160 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5162 if (FCOE_INIT(bp) &&
5163 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5164 /* Prevent local bottom-halves from running as
5165 * we are going to change the local NAPI list.
5168 napi_schedule(&bnx2x_fcoe(bp, napi));
5172 /* Handle EQ completions */
5174 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5175 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5177 status &= ~BNX2X_DEF_SB_IDX;
5180 /* if status is non zero then perhaps something went wrong */
5181 if (unlikely(status))
5183 "got an unknown interrupt! (status 0x%x)\n", status);
5185 /* ack status block only if something was actually handled */
5186 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5187 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5191 /* must be called after the EQ processing (since eq leads to sriov
5192 * ramrod completion flows).
5193 * This flow may have been scheduled by the arrival of a ramrod
5194 * completion, or by the sriov code rescheduling itself.
5196 bnx2x_iov_sp_task(bp);
5198 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5199 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5201 bnx2x_link_report(bp);
5202 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5206 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5208 struct net_device *dev = dev_instance;
5209 struct bnx2x *bp = netdev_priv(dev);
5211 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5212 IGU_INT_DISABLE, 0);
5214 #ifdef BNX2X_STOP_ON_ERROR
5215 if (unlikely(bp->panic))
5219 if (CNIC_LOADED(bp)) {
5220 struct cnic_ops *c_ops;
5223 c_ops = rcu_dereference(bp->cnic_ops);
5225 c_ops->cnic_handler(bp->cnic_data, NULL);
5229 /* schedule sp task to perform default status block work, ack
5230 * attentions and enable interrupts.
5232 bnx2x_schedule_sp_task(bp);
5237 /* end of slow path */
5240 void bnx2x_drv_pulse(struct bnx2x *bp)
5242 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5243 bp->fw_drv_pulse_wr_seq);
5247 static void bnx2x_timer(unsigned long data)
5249 struct bnx2x *bp = (struct bnx2x *) data;
5251 if (!netif_running(bp->dev))
5254 if (!BP_NOMCP(bp)) {
5255 int mb_idx = BP_FW_MB_IDX(bp);
5259 ++bp->fw_drv_pulse_wr_seq;
5260 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5261 /* TBD - add SYSTEM_TIME */
5262 drv_pulse = bp->fw_drv_pulse_wr_seq;
5263 bnx2x_drv_pulse(bp);
5265 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5266 MCP_PULSE_SEQ_MASK);
5267 /* The delta between driver pulse and mcp response
5268 * should be 1 (before mcp response) or 0 (after mcp response)
5270 if ((drv_pulse != mcp_pulse) &&
5271 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5272 /* someone lost a heartbeat... */
5273 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5274 drv_pulse, mcp_pulse);
5278 if (bp->state == BNX2X_STATE_OPEN)
5279 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5281 mod_timer(&bp->timer, jiffies + bp->current_interval);
5284 /* end of Statistics */
5289 * nic init service functions
5292 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5295 if (!(len%4) && !(addr%4))
5296 for (i = 0; i < len; i += 4)
5297 REG_WR(bp, addr + i, fill);
5299 for (i = 0; i < len; i++)
5300 REG_WR8(bp, addr + i, fill);
5304 /* helper: writes FP SP data to FW - data_size in dwords */
5305 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5311 for (index = 0; index < data_size; index++)
5312 REG_WR(bp, BAR_CSTRORM_INTMEM +
5313 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5315 *(sb_data_p + index));
5318 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5322 struct hc_status_block_data_e2 sb_data_e2;
5323 struct hc_status_block_data_e1x sb_data_e1x;
5325 /* disable the function first */
5326 if (!CHIP_IS_E1x(bp)) {
5327 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5328 sb_data_e2.common.state = SB_DISABLED;
5329 sb_data_e2.common.p_func.vf_valid = false;
5330 sb_data_p = (u32 *)&sb_data_e2;
5331 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5333 memset(&sb_data_e1x, 0,
5334 sizeof(struct hc_status_block_data_e1x));
5335 sb_data_e1x.common.state = SB_DISABLED;
5336 sb_data_e1x.common.p_func.vf_valid = false;
5337 sb_data_p = (u32 *)&sb_data_e1x;
5338 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5340 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5342 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5343 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5344 CSTORM_STATUS_BLOCK_SIZE);
5345 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5346 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5347 CSTORM_SYNC_BLOCK_SIZE);
5350 /* helper: writes SP SB data to FW */
5351 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5352 struct hc_sp_status_block_data *sp_sb_data)
5354 int func = BP_FUNC(bp);
5356 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5357 REG_WR(bp, BAR_CSTRORM_INTMEM +
5358 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5360 *((u32 *)sp_sb_data + i));
5363 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5365 int func = BP_FUNC(bp);
5366 struct hc_sp_status_block_data sp_sb_data;
5367 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5369 sp_sb_data.state = SB_DISABLED;
5370 sp_sb_data.p_func.vf_valid = false;
5372 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5374 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5375 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5376 CSTORM_SP_STATUS_BLOCK_SIZE);
5377 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5378 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5379 CSTORM_SP_SYNC_BLOCK_SIZE);
5384 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5385 int igu_sb_id, int igu_seg_id)
5387 hc_sm->igu_sb_id = igu_sb_id;
5388 hc_sm->igu_seg_id = igu_seg_id;
5389 hc_sm->timer_value = 0xFF;
5390 hc_sm->time_to_expire = 0xFFFFFFFF;
5394 /* allocates state machine ids. */
5395 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5397 /* zero out state machine indices */
5399 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5402 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5403 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5404 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5405 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5409 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5410 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5413 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5414 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5415 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5416 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5417 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5418 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5419 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5420 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5423 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5424 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5428 struct hc_status_block_data_e2 sb_data_e2;
5429 struct hc_status_block_data_e1x sb_data_e1x;
5430 struct hc_status_block_sm *hc_sm_p;
5434 if (CHIP_INT_MODE_IS_BC(bp))
5435 igu_seg_id = HC_SEG_ACCESS_NORM;
5437 igu_seg_id = IGU_SEG_ACCESS_NORM;
5439 bnx2x_zero_fp_sb(bp, fw_sb_id);
5441 if (!CHIP_IS_E1x(bp)) {
5442 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5443 sb_data_e2.common.state = SB_ENABLED;
5444 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5445 sb_data_e2.common.p_func.vf_id = vfid;
5446 sb_data_e2.common.p_func.vf_valid = vf_valid;
5447 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5448 sb_data_e2.common.same_igu_sb_1b = true;
5449 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5450 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5451 hc_sm_p = sb_data_e2.common.state_machine;
5452 sb_data_p = (u32 *)&sb_data_e2;
5453 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5454 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5456 memset(&sb_data_e1x, 0,
5457 sizeof(struct hc_status_block_data_e1x));
5458 sb_data_e1x.common.state = SB_ENABLED;
5459 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5460 sb_data_e1x.common.p_func.vf_id = 0xff;
5461 sb_data_e1x.common.p_func.vf_valid = false;
5462 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5463 sb_data_e1x.common.same_igu_sb_1b = true;
5464 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5465 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5466 hc_sm_p = sb_data_e1x.common.state_machine;
5467 sb_data_p = (u32 *)&sb_data_e1x;
5468 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5469 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5472 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5473 igu_sb_id, igu_seg_id);
5474 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5475 igu_sb_id, igu_seg_id);
5477 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5479 /* write indecies to HW */
5480 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5483 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5484 u16 tx_usec, u16 rx_usec)
5486 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5488 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5489 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5491 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5492 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5494 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5495 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5499 static void bnx2x_init_def_sb(struct bnx2x *bp)
5501 struct host_sp_status_block *def_sb = bp->def_status_blk;
5502 dma_addr_t mapping = bp->def_status_blk_mapping;
5503 int igu_sp_sb_index;
5505 int port = BP_PORT(bp);
5506 int func = BP_FUNC(bp);
5507 int reg_offset, reg_offset_en5;
5510 struct hc_sp_status_block_data sp_sb_data;
5511 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5513 if (CHIP_INT_MODE_IS_BC(bp)) {
5514 igu_sp_sb_index = DEF_SB_IGU_ID;
5515 igu_seg_id = HC_SEG_ACCESS_DEF;
5517 igu_sp_sb_index = bp->igu_dsb_id;
5518 igu_seg_id = IGU_SEG_ACCESS_DEF;
5522 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5523 atten_status_block);
5524 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5528 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5529 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5530 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5531 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5532 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5534 /* take care of sig[0]..sig[4] */
5535 for (sindex = 0; sindex < 4; sindex++)
5536 bp->attn_group[index].sig[sindex] =
5537 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5539 if (!CHIP_IS_E1x(bp))
5541 * enable5 is separate from the rest of the registers,
5542 * and therefore the address skip is 4
5543 * and not 16 between the different groups
5545 bp->attn_group[index].sig[4] = REG_RD(bp,
5546 reg_offset_en5 + 0x4*index);
5548 bp->attn_group[index].sig[4] = 0;
5551 if (bp->common.int_block == INT_BLOCK_HC) {
5552 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5553 HC_REG_ATTN_MSG0_ADDR_L);
5555 REG_WR(bp, reg_offset, U64_LO(section));
5556 REG_WR(bp, reg_offset + 4, U64_HI(section));
5557 } else if (!CHIP_IS_E1x(bp)) {
5558 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5559 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5562 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5565 bnx2x_zero_sp_sb(bp);
5567 sp_sb_data.state = SB_ENABLED;
5568 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5569 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5570 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5571 sp_sb_data.igu_seg_id = igu_seg_id;
5572 sp_sb_data.p_func.pf_id = func;
5573 sp_sb_data.p_func.vnic_id = BP_VN(bp);
5574 sp_sb_data.p_func.vf_id = 0xff;
5576 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5578 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5581 void bnx2x_update_coalesce(struct bnx2x *bp)
5585 for_each_eth_queue(bp, i)
5586 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5587 bp->tx_ticks, bp->rx_ticks);
5590 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5592 spin_lock_init(&bp->spq_lock);
5593 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5595 bp->spq_prod_idx = 0;
5596 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5597 bp->spq_prod_bd = bp->spq;
5598 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5601 static void bnx2x_init_eq_ring(struct bnx2x *bp)
5604 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5605 union event_ring_elem *elem =
5606 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5608 elem->next_page.addr.hi =
5609 cpu_to_le32(U64_HI(bp->eq_mapping +
5610 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5611 elem->next_page.addr.lo =
5612 cpu_to_le32(U64_LO(bp->eq_mapping +
5613 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5616 bp->eq_prod = NUM_EQ_DESC;
5617 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5618 /* we want a warning message before it gets rought... */
5619 atomic_set(&bp->eq_spq_left,
5620 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5624 /* called with netif_addr_lock_bh() */
5625 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5626 unsigned long rx_mode_flags,
5627 unsigned long rx_accept_flags,
5628 unsigned long tx_accept_flags,
5629 unsigned long ramrod_flags)
5631 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5634 memset(&ramrod_param, 0, sizeof(ramrod_param));
5636 /* Prepare ramrod parameters */
5637 ramrod_param.cid = 0;
5638 ramrod_param.cl_id = cl_id;
5639 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5640 ramrod_param.func_id = BP_FUNC(bp);
5642 ramrod_param.pstate = &bp->sp_state;
5643 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5645 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5646 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5648 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5650 ramrod_param.ramrod_flags = ramrod_flags;
5651 ramrod_param.rx_mode_flags = rx_mode_flags;
5653 ramrod_param.rx_accept_flags = rx_accept_flags;
5654 ramrod_param.tx_accept_flags = tx_accept_flags;
5656 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5658 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5663 /* called with netif_addr_lock_bh() */
5664 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5666 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5667 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5671 /* Configure rx_mode of FCoE Queue */
5672 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5674 switch (bp->rx_mode) {
5675 case BNX2X_RX_MODE_NONE:
5677 * 'drop all' supersedes any accept flags that may have been
5678 * passed to the function.
5681 case BNX2X_RX_MODE_NORMAL:
5682 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5683 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
5684 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5686 /* internal switching mode */
5687 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5688 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
5689 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5692 case BNX2X_RX_MODE_ALLMULTI:
5693 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5694 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5695 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5697 /* internal switching mode */
5698 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5699 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5700 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5703 case BNX2X_RX_MODE_PROMISC:
5704 /* According to deffinition of SI mode, iface in promisc mode
5705 * should receive matched and unmatched (in resolution of port)
5708 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
5709 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5710 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5711 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5713 /* internal switching mode */
5714 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5715 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5718 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
5720 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5724 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
5728 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5729 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
5730 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
5733 __set_bit(RAMROD_RX, &ramrod_flags);
5734 __set_bit(RAMROD_TX, &ramrod_flags);
5736 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
5737 tx_accept_flags, ramrod_flags);
5740 static void bnx2x_init_internal_common(struct bnx2x *bp)
5746 * In switch independent mode, the TSTORM needs to accept
5747 * packets that failed classification, since approximate match
5748 * mac addresses aren't written to NIG LLH
5750 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5751 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5752 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5753 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5754 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5756 /* Zero this manually as its initialization is
5757 currently missing in the initTool */
5758 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5759 REG_WR(bp, BAR_USTRORM_INTMEM +
5760 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5761 if (!CHIP_IS_E1x(bp)) {
5762 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5763 CHIP_INT_MODE_IS_BC(bp) ?
5764 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5768 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5770 switch (load_code) {
5771 case FW_MSG_CODE_DRV_LOAD_COMMON:
5772 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5773 bnx2x_init_internal_common(bp);
5776 case FW_MSG_CODE_DRV_LOAD_PORT:
5780 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5781 /* internal memory per function is
5782 initialized inside bnx2x_pf_init */
5786 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5791 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5793 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5796 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5798 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5801 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5803 if (CHIP_IS_E1x(fp->bp))
5804 return BP_L_ID(fp->bp) + fp->index;
5805 else /* We want Client ID to be the same as IGU SB ID for 57712 */
5806 return bnx2x_fp_igu_sb_id(fp);
5809 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5811 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
5813 unsigned long q_type = 0;
5814 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
5815 fp->rx_queue = fp_idx;
5817 fp->cl_id = bnx2x_fp_cl_id(fp);
5818 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5819 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
5820 /* qZone id equals to FW (per path) client id */
5821 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
5824 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5826 /* Setup SB indicies */
5827 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5829 /* Configure Queue State object */
5830 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5831 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
5833 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5836 for_each_cos_in_tx_queue(fp, cos) {
5837 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5838 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5839 FP_COS_TO_TXQ(fp, cos, bp),
5840 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5841 cids[cos] = fp->txdata_ptr[cos]->cid;
5844 /* nothing more for vf to do here */
5848 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
5849 fp->fw_sb_id, fp->igu_sb_id);
5850 bnx2x_update_fpsb_idx(fp);
5851 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5852 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5853 bnx2x_sp_mapping(bp, q_rdata), q_type);
5856 * Configure classification DBs: Always enable Tx switching
5858 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
5861 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
5862 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
5866 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5870 for (i = 1; i <= NUM_TX_RINGS; i++) {
5871 struct eth_tx_next_bd *tx_next_bd =
5872 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5874 tx_next_bd->addr_hi =
5875 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5876 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5877 tx_next_bd->addr_lo =
5878 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5879 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5882 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5883 txdata->tx_db.data.zero_fill1 = 0;
5884 txdata->tx_db.data.prod = 0;
5886 txdata->tx_pkt_prod = 0;
5887 txdata->tx_pkt_cons = 0;
5888 txdata->tx_bd_prod = 0;
5889 txdata->tx_bd_cons = 0;
5893 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5897 for_each_tx_queue_cnic(bp, i)
5898 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5900 static void bnx2x_init_tx_rings(struct bnx2x *bp)
5905 for_each_eth_queue(bp, i)
5906 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5907 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5910 void bnx2x_nic_init_cnic(struct bnx2x *bp)
5913 bnx2x_init_fcoe_fp(bp);
5915 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
5916 BNX2X_VF_ID_INVALID, false,
5917 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5919 /* ensure status block indices were read */
5921 bnx2x_init_rx_rings_cnic(bp);
5922 bnx2x_init_tx_rings_cnic(bp);
5929 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5933 for_each_eth_queue(bp, i)
5934 bnx2x_init_eth_fp(bp, i);
5936 /* ensure status block indices were read */
5938 bnx2x_init_rx_rings(bp);
5939 bnx2x_init_tx_rings(bp);
5944 /* Initialize MOD_ABS interrupts */
5945 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5946 bp->common.shmem_base, bp->common.shmem2_base,
5949 bnx2x_init_def_sb(bp);
5950 bnx2x_update_dsb_idx(bp);
5951 bnx2x_init_sp_ring(bp);
5952 bnx2x_init_eq_ring(bp);
5953 bnx2x_init_internal(bp, load_code);
5955 bnx2x_stats_init(bp);
5957 /* flush all before enabling interrupts */
5961 bnx2x_int_enable(bp);
5963 /* Check for SPIO5 */
5964 bnx2x_attn_int_deasserted0(bp,
5965 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5966 AEU_INPUTS_ATTN_BITS_SPIO5);
5969 /* end of nic init */
5972 * gzip service functions
5975 static int bnx2x_gunzip_init(struct bnx2x *bp)
5977 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5978 &bp->gunzip_mapping, GFP_KERNEL);
5979 if (bp->gunzip_buf == NULL)
5982 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5983 if (bp->strm == NULL)
5986 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
5987 if (bp->strm->workspace == NULL)
5997 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5998 bp->gunzip_mapping);
5999 bp->gunzip_buf = NULL;
6002 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6006 static void bnx2x_gunzip_end(struct bnx2x *bp)
6009 vfree(bp->strm->workspace);
6014 if (bp->gunzip_buf) {
6015 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6016 bp->gunzip_mapping);
6017 bp->gunzip_buf = NULL;
6021 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6025 /* check gzip header */
6026 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6027 BNX2X_ERR("Bad gzip header\n");
6035 if (zbuf[3] & FNAME)
6036 while ((zbuf[n++] != 0) && (n < len));
6038 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6039 bp->strm->avail_in = len - n;
6040 bp->strm->next_out = bp->gunzip_buf;
6041 bp->strm->avail_out = FW_BUF_SIZE;
6043 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6047 rc = zlib_inflate(bp->strm, Z_FINISH);
6048 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6049 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6052 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6053 if (bp->gunzip_outlen & 0x3)
6055 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6057 bp->gunzip_outlen >>= 2;
6059 zlib_inflateEnd(bp->strm);
6061 if (rc == Z_STREAM_END)
6067 /* nic load/unload */
6070 * General service functions
6073 /* send a NIG loopback debug packet */
6074 static void bnx2x_lb_pckt(struct bnx2x *bp)
6078 /* Ethernet source and destination addresses */
6079 wb_write[0] = 0x55555555;
6080 wb_write[1] = 0x55555555;
6081 wb_write[2] = 0x20; /* SOP */
6082 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6084 /* NON-IP protocol */
6085 wb_write[0] = 0x09000000;
6086 wb_write[1] = 0x55555555;
6087 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6088 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6091 /* some of the internal memories
6092 * are not directly readable from the driver
6093 * to test them we send debug packets
6095 static int bnx2x_int_mem_test(struct bnx2x *bp)
6101 if (CHIP_REV_IS_FPGA(bp))
6103 else if (CHIP_REV_IS_EMUL(bp))
6108 /* Disable inputs of parser neighbor blocks */
6109 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6110 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6111 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6112 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6114 /* Write 0 to parser credits for CFC search request */
6115 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6117 /* send Ethernet packet */
6120 /* TODO do i reset NIG statistic? */
6121 /* Wait until NIG register shows 1 packet of size 0x10 */
6122 count = 1000 * factor;
6125 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6126 val = *bnx2x_sp(bp, wb_data[0]);
6134 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6138 /* Wait until PRS register shows 1 packet */
6139 count = 1000 * factor;
6141 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6149 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6153 /* Reset and init BRB, PRS */
6154 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6156 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6158 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6159 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6161 DP(NETIF_MSG_HW, "part2\n");
6163 /* Disable inputs of parser neighbor blocks */
6164 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6165 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6166 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6167 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6169 /* Write 0 to parser credits for CFC search request */
6170 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6172 /* send 10 Ethernet packets */
6173 for (i = 0; i < 10; i++)
6176 /* Wait until NIG register shows 10 + 1
6177 packets of size 11*0x10 = 0xb0 */
6178 count = 1000 * factor;
6181 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6182 val = *bnx2x_sp(bp, wb_data[0]);
6190 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6194 /* Wait until PRS register shows 2 packets */
6195 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6197 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6199 /* Write 1 to parser credits for CFC search request */
6200 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6202 /* Wait until PRS register shows 3 packets */
6203 msleep(10 * factor);
6204 /* Wait until NIG register shows 1 packet of size 0x10 */
6205 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6207 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6209 /* clear NIG EOP FIFO */
6210 for (i = 0; i < 11; i++)
6211 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6212 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6214 BNX2X_ERR("clear of NIG failed\n");
6218 /* Reset and init BRB, PRS, NIG */
6219 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6221 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6223 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6224 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6225 if (!CNIC_SUPPORT(bp))
6227 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6229 /* Enable inputs of parser neighbor blocks */
6230 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6231 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6232 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6233 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6235 DP(NETIF_MSG_HW, "done\n");
6240 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6244 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6245 if (!CHIP_IS_E1x(bp))
6246 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6248 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6249 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6250 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6252 * mask read length error interrupts in brb for parser
6253 * (parsing unit and 'checksum and crc' unit)
6254 * these errors are legal (PU reads fixed length and CAC can cause
6255 * read length error on truncated packets)
6257 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6258 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6259 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6260 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6261 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6262 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6263 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6264 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6265 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6266 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6267 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6268 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6269 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6270 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6271 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6272 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6273 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6274 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6275 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6277 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6278 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6279 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6280 if (!CHIP_IS_E1x(bp))
6281 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6282 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6283 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6285 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6286 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6287 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6288 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6290 if (!CHIP_IS_E1x(bp))
6291 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6292 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6294 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6295 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6296 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6297 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
6300 static void bnx2x_reset_common(struct bnx2x *bp)
6305 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6308 if (CHIP_IS_E3(bp)) {
6309 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6310 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6316 static void bnx2x_setup_dmae(struct bnx2x *bp)
6319 spin_lock_init(&bp->dmae_lock);
6322 static void bnx2x_init_pxp(struct bnx2x *bp)
6325 int r_order, w_order;
6327 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6328 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6329 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6331 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6333 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6337 bnx2x_init_pxp_arb(bp, r_order, w_order);
6340 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6350 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6351 SHARED_HW_CFG_FAN_FAILURE_MASK;
6353 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6357 * The fan failure mechanism is usually related to the PHY type since
6358 * the power consumption of the board is affected by the PHY. Currently,
6359 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6361 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6362 for (port = PORT_0; port < PORT_MAX; port++) {
6364 bnx2x_fan_failure_det_req(
6366 bp->common.shmem_base,
6367 bp->common.shmem2_base,
6371 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6373 if (is_required == 0)
6376 /* Fan failure is indicated by SPIO 5 */
6377 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6379 /* set to active low mode */
6380 val = REG_RD(bp, MISC_REG_SPIO_INT);
6381 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6382 REG_WR(bp, MISC_REG_SPIO_INT, val);
6384 /* enable interrupt to signal the IGU */
6385 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6386 val |= MISC_SPIO_SPIO5;
6387 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6390 void bnx2x_pf_disable(struct bnx2x *bp)
6392 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6393 val &= ~IGU_PF_CONF_FUNC_EN;
6395 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6396 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6397 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6400 static void bnx2x__common_init_phy(struct bnx2x *bp)
6402 u32 shmem_base[2], shmem2_base[2];
6403 /* Avoid common init in case MFW supports LFA */
6404 if (SHMEM2_RD(bp, size) >
6405 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6407 shmem_base[0] = bp->common.shmem_base;
6408 shmem2_base[0] = bp->common.shmem2_base;
6409 if (!CHIP_IS_E1x(bp)) {
6411 SHMEM2_RD(bp, other_shmem_base_addr);
6413 SHMEM2_RD(bp, other_shmem2_base_addr);
6415 bnx2x_acquire_phy_lock(bp);
6416 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6417 bp->common.chip_id);
6418 bnx2x_release_phy_lock(bp);
6422 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6424 * @bp: driver handle
6426 static int bnx2x_init_hw_common(struct bnx2x *bp)
6430 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
6433 * take the UNDI lock to protect undi_unload flow from accessing
6434 * registers while we're resetting the chip
6436 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6438 bnx2x_reset_common(bp);
6439 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6442 if (CHIP_IS_E3(bp)) {
6443 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6444 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6446 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6448 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6450 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6452 if (!CHIP_IS_E1x(bp)) {
6456 * 4-port mode or 2-port mode we need to turn of master-enable
6457 * for everyone, after that, turn it back on for self.
6458 * so, we disregard multi-function or not, and always disable
6459 * for all functions on the given path, this means 0,2,4,6 for
6460 * path 0 and 1,3,5,7 for path 1
6462 for (abs_func_id = BP_PATH(bp);
6463 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6464 if (abs_func_id == BP_ABS_FUNC(bp)) {
6466 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6471 bnx2x_pretend_func(bp, abs_func_id);
6472 /* clear pf enable */
6473 bnx2x_pf_disable(bp);
6474 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6478 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6479 if (CHIP_IS_E1(bp)) {
6480 /* enable HW interrupt from PXP on USDM overflow
6481 bit 16 on INT_MASK_0 */
6482 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6485 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6489 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6490 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6491 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6492 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6493 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6494 /* make sure this value is 0 */
6495 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6497 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6498 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6499 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6500 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6501 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6504 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6506 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6507 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6509 /* let the HW do it's magic ... */
6511 /* finish PXP init */
6512 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6514 BNX2X_ERR("PXP2 CFG failed\n");
6517 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6519 BNX2X_ERR("PXP2 RD_INIT failed\n");
6523 /* Timers bug workaround E2 only. We need to set the entire ILT to
6524 * have entries with value "0" and valid bit on.
6525 * This needs to be done by the first PF that is loaded in a path
6526 * (i.e. common phase)
6528 if (!CHIP_IS_E1x(bp)) {
6529 /* In E2 there is a bug in the timers block that can cause function 6 / 7
6530 * (i.e. vnic3) to start even if it is marked as "scan-off".
6531 * This occurs when a different function (func2,3) is being marked
6532 * as "scan-off". Real-life scenario for example: if a driver is being
6533 * load-unloaded while func6,7 are down. This will cause the timer to access
6534 * the ilt, translate to a logical address and send a request to read/write.
6535 * Since the ilt for the function that is down is not valid, this will cause
6536 * a translation error which is unrecoverable.
6537 * The Workaround is intended to make sure that when this happens nothing fatal
6538 * will occur. The workaround:
6539 * 1. First PF driver which loads on a path will:
6540 * a. After taking the chip out of reset, by using pretend,
6541 * it will write "0" to the following registers of
6543 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6544 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6545 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6546 * And for itself it will write '1' to
6547 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6548 * dmae-operations (writing to pram for example.)
6549 * note: can be done for only function 6,7 but cleaner this
6551 * b. Write zero+valid to the entire ILT.
6552 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
6553 * VNIC3 (of that port). The range allocated will be the
6554 * entire ILT. This is needed to prevent ILT range error.
6555 * 2. Any PF driver load flow:
6556 * a. ILT update with the physical addresses of the allocated
6558 * b. Wait 20msec. - note that this timeout is needed to make
6559 * sure there are no requests in one of the PXP internal
6560 * queues with "old" ILT addresses.
6561 * c. PF enable in the PGLC.
6562 * d. Clear the was_error of the PF in the PGLC. (could have
6563 * occured while driver was down)
6564 * e. PF enable in the CFC (WEAK + STRONG)
6565 * f. Timers scan enable
6566 * 3. PF driver unload flow:
6567 * a. Clear the Timers scan_en.
6568 * b. Polling for scan_on=0 for that PF.
6569 * c. Clear the PF enable bit in the PXP.
6570 * d. Clear the PF enable in the CFC (WEAK + STRONG)
6571 * e. Write zero+valid to all ILT entries (The valid bit must
6573 * f. If this is VNIC 3 of a port then also init
6574 * first_timers_ilt_entry to zero and last_timers_ilt_entry
6575 * to the last enrty in the ILT.
6578 * Currently the PF error in the PGLC is non recoverable.
6579 * In the future the there will be a recovery routine for this error.
6580 * Currently attention is masked.
6581 * Having an MCP lock on the load/unload process does not guarantee that
6582 * there is no Timer disable during Func6/7 enable. This is because the
6583 * Timers scan is currently being cleared by the MCP on FLR.
6584 * Step 2.d can be done only for PF6/7 and the driver can also check if
6585 * there is error before clearing it. But the flow above is simpler and
6587 * All ILT entries are written by zero+valid and not just PF6/7
6588 * ILT entries since in the future the ILT entries allocation for
6589 * PF-s might be dynamic.
6591 struct ilt_client_info ilt_cli;
6592 struct bnx2x_ilt ilt;
6593 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6594 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6596 /* initialize dummy TM client */
6598 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6599 ilt_cli.client_num = ILT_CLIENT_TM;
6601 /* Step 1: set zeroes to all ilt page entries with valid bit on
6602 * Step 2: set the timers first/last ilt entry to point
6603 * to the entire range to prevent ILT range error for 3rd/4th
6604 * vnic (this code assumes existance of the vnic)
6606 * both steps performed by call to bnx2x_ilt_client_init_op()
6607 * with dummy TM client
6609 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6610 * and his brother are split registers
6612 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6613 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6614 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6616 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6617 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6618 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6622 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6623 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6625 if (!CHIP_IS_E1x(bp)) {
6626 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6627 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6628 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6630 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6632 /* let the HW do it's magic ... */
6635 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6636 } while (factor-- && (val != 1));
6639 BNX2X_ERR("ATC_INIT failed\n");
6644 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6646 bnx2x_iov_init_dmae(bp);
6648 /* clean the DMAE memory */
6650 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6652 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6654 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6656 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6658 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6660 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6661 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6662 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6663 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6665 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6668 /* QM queues pointers table */
6669 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6671 /* soft reset pulse */
6672 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6673 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6675 if (CNIC_SUPPORT(bp))
6676 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6678 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6679 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
6680 if (!CHIP_REV_IS_SLOW(bp))
6681 /* enable hw interrupt from doorbell Q */
6682 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6684 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6686 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6687 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6689 if (!CHIP_IS_E1(bp))
6690 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6692 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6693 if (IS_MF_AFEX(bp)) {
6694 /* configure that VNTag and VLAN headers must be
6695 * received in afex mode
6697 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6698 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6699 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6700 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6701 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6703 /* Bit-map indicating which L2 hdrs may appear
6704 * after the basic Ethernet header
6706 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6707 bp->path_has_ovlan ? 7 : 6);
6711 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6712 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6713 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6714 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6716 if (!CHIP_IS_E1x(bp)) {
6717 /* reset VFC memories */
6718 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6719 VFC_MEMORIES_RST_REG_CAM_RST |
6720 VFC_MEMORIES_RST_REG_RAM_RST);
6721 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6722 VFC_MEMORIES_RST_REG_CAM_RST |
6723 VFC_MEMORIES_RST_REG_RAM_RST);
6728 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6729 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6730 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6731 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6739 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6740 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6741 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6743 if (!CHIP_IS_E1x(bp)) {
6744 if (IS_MF_AFEX(bp)) {
6745 /* configure that VNTag and VLAN headers must be
6748 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6749 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6750 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6751 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6752 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6754 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6755 bp->path_has_ovlan ? 7 : 6);
6759 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6761 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6763 if (CNIC_SUPPORT(bp)) {
6764 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6765 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6766 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6767 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6768 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6769 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6770 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6771 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6772 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6773 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6775 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6777 if (sizeof(union cdu_context) != 1024)
6778 /* we currently assume that a context is 1024 bytes */
6779 dev_alert(&bp->pdev->dev,
6780 "please adjust the size of cdu_context(%ld)\n",
6781 (long)sizeof(union cdu_context));
6783 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
6784 val = (4 << 24) + (0 << 12) + 1024;
6785 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6787 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
6788 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6789 /* enable context validation interrupt from CFC */
6790 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6792 /* set the thresholds to prevent CFC/CDU race */
6793 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6795 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
6797 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
6798 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
6800 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
6801 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
6803 /* Reset PCIE errors for debug */
6804 REG_WR(bp, 0x2814, 0xffffffff);
6805 REG_WR(bp, 0x3820, 0xffffffff);
6807 if (!CHIP_IS_E1x(bp)) {
6808 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
6809 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
6810 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
6811 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
6812 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
6813 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
6814 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
6815 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
6816 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
6817 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
6818 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
6821 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
6822 if (!CHIP_IS_E1(bp)) {
6823 /* in E3 this done in per-port section */
6824 if (!CHIP_IS_E3(bp))
6825 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
6827 if (CHIP_IS_E1H(bp))
6828 /* not applicable for E2 (and above ...) */
6829 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
6831 if (CHIP_REV_IS_SLOW(bp))
6834 /* finish CFC init */
6835 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6837 BNX2X_ERR("CFC LL_INIT failed\n");
6840 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6842 BNX2X_ERR("CFC AC_INIT failed\n");
6845 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6847 BNX2X_ERR("CFC CAM_INIT failed\n");
6850 REG_WR(bp, CFC_REG_DEBUG0, 0);
6852 if (CHIP_IS_E1(bp)) {
6853 /* read NIG statistic
6854 to see if this is our first up since powerup */
6855 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6856 val = *bnx2x_sp(bp, wb_data[0]);
6858 /* do internal memory self test */
6859 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6860 BNX2X_ERR("internal mem self test failed\n");
6865 bnx2x_setup_fan_failure_detection(bp);
6867 /* clear PXP2 attentions */
6868 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6870 bnx2x_enable_blocks_attention(bp);
6871 bnx2x_enable_blocks_parity(bp);
6873 if (!BP_NOMCP(bp)) {
6874 if (CHIP_IS_E1x(bp))
6875 bnx2x__common_init_phy(bp);
6877 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6883 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
6885 * @bp: driver handle
6887 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
6889 int rc = bnx2x_init_hw_common(bp);
6894 /* In E2 2-PORT mode, same ext phy is used for the two paths */
6896 bnx2x__common_init_phy(bp);
6901 static int bnx2x_init_hw_port(struct bnx2x *bp)
6903 int port = BP_PORT(bp);
6904 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
6909 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
6911 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6913 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
6914 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
6915 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
6917 /* Timers bug workaround: disables the pf_master bit in pglue at
6918 * common phase, we need to enable it here before any dmae access are
6919 * attempted. Therefore we manually added the enable-master to the
6920 * port phase (it also happens in the function phase)
6922 if (!CHIP_IS_E1x(bp))
6923 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
6925 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
6926 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
6927 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
6928 bnx2x_init_block(bp, BLOCK_QM, init_phase);
6930 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
6931 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
6932 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
6933 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
6935 /* QM cid (connection) count */
6936 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6938 if (CNIC_SUPPORT(bp)) {
6939 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6940 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6941 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6944 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6946 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6948 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6951 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6952 else if (bp->dev->mtu > 4096) {
6953 if (bp->flags & ONE_PORT_FLAG)
6957 /* (24*1024 + val*4)/256 */
6958 low = 96 + (val/64) +
6959 ((val % 64) ? 1 : 0);
6962 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6963 high = low + 56; /* 14*1024/256 */
6964 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6965 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6968 if (CHIP_MODE_IS_4_PORT(bp))
6969 REG_WR(bp, (BP_PORT(bp) ?
6970 BRB1_REG_MAC_GUARANTIED_1 :
6971 BRB1_REG_MAC_GUARANTIED_0), 40);
6974 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6975 if (CHIP_IS_E3B0(bp)) {
6976 if (IS_MF_AFEX(bp)) {
6977 /* configure headers for AFEX mode */
6978 REG_WR(bp, BP_PORT(bp) ?
6979 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6980 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6981 REG_WR(bp, BP_PORT(bp) ?
6982 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6983 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6984 REG_WR(bp, BP_PORT(bp) ?
6985 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6986 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6988 /* Ovlan exists only if we are in multi-function +
6989 * switch-dependent mode, in switch-independent there
6990 * is no ovlan headers
6992 REG_WR(bp, BP_PORT(bp) ?
6993 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6994 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6995 (bp->path_has_ovlan ? 7 : 6));
6999 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7000 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7001 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7002 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7004 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7005 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7006 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7007 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7009 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7010 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7012 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7014 if (CHIP_IS_E1x(bp)) {
7015 /* configure PBF to work without PAUSE mtu 9000 */
7016 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7018 /* update threshold */
7019 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7020 /* update init credit */
7021 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7024 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7026 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7029 if (CNIC_SUPPORT(bp))
7030 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7032 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7033 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7035 if (CHIP_IS_E1(bp)) {
7036 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7037 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7039 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7041 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7043 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7044 /* init aeu_mask_attn_func_0/1:
7045 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
7046 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
7047 * bits 4-7 are used for "per vn group attention" */
7048 val = IS_MF(bp) ? 0xF7 : 0x7;
7049 /* Enable DCBX attention for all but E1 */
7050 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7051 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7053 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7055 if (!CHIP_IS_E1x(bp)) {
7056 /* Bit-map indicating which L2 hdrs may appear after the
7057 * basic Ethernet header
7060 REG_WR(bp, BP_PORT(bp) ?
7061 NIG_REG_P1_HDRS_AFTER_BASIC :
7062 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7064 REG_WR(bp, BP_PORT(bp) ?
7065 NIG_REG_P1_HDRS_AFTER_BASIC :
7066 NIG_REG_P0_HDRS_AFTER_BASIC,
7067 IS_MF_SD(bp) ? 7 : 6);
7070 REG_WR(bp, BP_PORT(bp) ?
7071 NIG_REG_LLH1_MF_MODE :
7072 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7074 if (!CHIP_IS_E3(bp))
7075 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7077 if (!CHIP_IS_E1(bp)) {
7078 /* 0x2 disable mf_ov, 0x1 enable */
7079 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7080 (IS_MF_SD(bp) ? 0x1 : 0x2));
7082 if (!CHIP_IS_E1x(bp)) {
7084 switch (bp->mf_mode) {
7085 case MULTI_FUNCTION_SD:
7088 case MULTI_FUNCTION_SI:
7089 case MULTI_FUNCTION_AFEX:
7094 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7095 NIG_REG_LLH0_CLS_TYPE), val);
7098 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7099 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7100 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7105 /* If SPIO5 is set to generate interrupts, enable it for this port */
7106 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7107 if (val & MISC_SPIO_SPIO5) {
7108 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7109 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7110 val = REG_RD(bp, reg_addr);
7111 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7112 REG_WR(bp, reg_addr, val);
7118 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7124 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7126 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7128 wb_write[0] = ONCHIP_ADDR1(addr);
7129 wb_write[1] = ONCHIP_ADDR2(addr);
7130 REG_WR_DMAE(bp, reg, wb_write, 2);
7133 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7135 u32 data, ctl, cnt = 100;
7136 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7137 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7138 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7139 u32 sb_bit = 1 << (idu_sb_id%32);
7140 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7141 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7143 /* Not supported in BC mode */
7144 if (CHIP_INT_MODE_IS_BC(bp))
7147 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7148 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7149 IGU_REGULAR_CLEANUP_SET |
7150 IGU_REGULAR_BCLEANUP;
7152 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7153 func_encode << IGU_CTRL_REG_FID_SHIFT |
7154 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7156 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7157 data, igu_addr_data);
7158 REG_WR(bp, igu_addr_data, data);
7161 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7163 REG_WR(bp, igu_addr_ctl, ctl);
7167 /* wait for clean up to finish */
7168 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7172 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7174 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7175 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7179 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7181 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7184 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7186 u32 i, base = FUNC_ILT_BASE(func);
7187 for (i = base; i < base + ILT_PER_FUNC; i++)
7188 bnx2x_ilt_wr(bp, i, 0);
7192 static void bnx2x_init_searcher(struct bnx2x *bp)
7194 int port = BP_PORT(bp);
7195 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7196 /* T1 hash bits value determines the T1 number of entries */
7197 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7200 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7203 struct bnx2x_func_state_params func_params = {NULL};
7204 struct bnx2x_func_switch_update_params *switch_update_params =
7205 &func_params.params.switch_update;
7207 /* Prepare parameters for function state transitions */
7208 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7209 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7211 func_params.f_obj = &bp->func_obj;
7212 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7214 /* Function parameters */
7215 switch_update_params->suspend = suspend;
7217 rc = bnx2x_func_state_change(bp, &func_params);
7222 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7224 int rc, i, port = BP_PORT(bp);
7225 int vlan_en = 0, mac_en[NUM_MACS];
7228 /* Close input from network */
7229 if (bp->mf_mode == SINGLE_FUNCTION) {
7230 bnx2x_set_rx_filter(&bp->link_params, 0);
7232 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7233 NIG_REG_LLH0_FUNC_EN);
7234 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7235 NIG_REG_LLH0_FUNC_EN, 0);
7236 for (i = 0; i < NUM_MACS; i++) {
7237 mac_en[i] = REG_RD(bp, port ?
7238 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7240 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7242 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7244 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7248 /* Close BMC to host */
7249 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7250 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7252 /* Suspend Tx switching to the PF. Completion of this ramrod
7253 * further guarantees that all the packets of that PF / child
7254 * VFs in BRB were processed by the Parser, so it is safe to
7255 * change the NIC_MODE register.
7257 rc = bnx2x_func_switch_update(bp, 1);
7259 BNX2X_ERR("Can't suspend tx-switching!\n");
7263 /* Change NIC_MODE register */
7264 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7266 /* Open input from network */
7267 if (bp->mf_mode == SINGLE_FUNCTION) {
7268 bnx2x_set_rx_filter(&bp->link_params, 1);
7270 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7271 NIG_REG_LLH0_FUNC_EN, vlan_en);
7272 for (i = 0; i < NUM_MACS; i++) {
7273 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7275 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7280 /* Enable BMC to host */
7281 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7282 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7284 /* Resume Tx switching to the PF */
7285 rc = bnx2x_func_switch_update(bp, 0);
7287 BNX2X_ERR("Can't resume tx-switching!\n");
7291 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7295 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7299 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7301 if (CONFIGURE_NIC_MODE(bp)) {
7302 /* Configrue searcher as part of function hw init */
7303 bnx2x_init_searcher(bp);
7305 /* Reset NIC mode */
7306 rc = bnx2x_reset_nic_mode(bp);
7308 BNX2X_ERR("Can't change NIC mode!\n");
7315 static int bnx2x_init_hw_func(struct bnx2x *bp)
7317 int port = BP_PORT(bp);
7318 int func = BP_FUNC(bp);
7319 int init_phase = PHASE_PF0 + func;
7320 struct bnx2x_ilt *ilt = BP_ILT(bp);
7323 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7324 int i, main_mem_width, rc;
7326 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7328 /* FLR cleanup - hmmm */
7329 if (!CHIP_IS_E1x(bp)) {
7330 rc = bnx2x_pf_flr_clnup(bp);
7335 /* set MSI reconfigure capability */
7336 if (bp->common.int_block == INT_BLOCK_HC) {
7337 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7338 val = REG_RD(bp, addr);
7339 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7340 REG_WR(bp, addr, val);
7343 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7344 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7347 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7350 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7351 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7353 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7354 * those of the VFs, so start line should be reset
7356 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7357 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7358 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7359 ilt->lines[cdu_ilt_start + i].page_mapping =
7360 bp->context[i].cxt_mapping;
7361 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7364 bnx2x_ilt_init_op(bp, INITOP_SET);
7366 if (!CONFIGURE_NIC_MODE(bp)) {
7367 bnx2x_init_searcher(bp);
7368 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7369 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7372 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7373 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7377 if (!CHIP_IS_E1x(bp)) {
7378 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7380 /* Turn on a single ISR mode in IGU if driver is going to use
7383 if (!(bp->flags & USING_MSIX_FLAG))
7384 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7386 * Timers workaround bug: function init part.
7387 * Need to wait 20msec after initializing ILT,
7388 * needed to make sure there are no requests in
7389 * one of the PXP internal queues with "old" ILT addresses
7393 * Master enable - Due to WB DMAE writes performed before this
7394 * register is re-initialized as part of the regular function
7397 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7398 /* Enable the function in IGU */
7399 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7404 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7406 if (!CHIP_IS_E1x(bp))
7407 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7409 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7410 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7411 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7412 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7413 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7414 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7415 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7416 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7417 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7418 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7419 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7420 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7421 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7423 if (!CHIP_IS_E1x(bp))
7424 REG_WR(bp, QM_REG_PF_EN, 1);
7426 if (!CHIP_IS_E1x(bp)) {
7427 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7428 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7429 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7430 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7432 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7434 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7435 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7437 bnx2x_iov_init_dq(bp);
7439 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7440 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7441 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7442 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7443 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7444 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7445 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7446 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7447 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7448 if (!CHIP_IS_E1x(bp))
7449 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7451 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7453 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7455 if (!CHIP_IS_E1x(bp))
7456 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7459 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7460 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7463 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7465 /* HC init per function */
7466 if (bp->common.int_block == INT_BLOCK_HC) {
7467 if (CHIP_IS_E1H(bp)) {
7468 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7470 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7471 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7473 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7476 int num_segs, sb_idx, prod_offset;
7478 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7480 if (!CHIP_IS_E1x(bp)) {
7481 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7482 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7485 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7487 if (!CHIP_IS_E1x(bp)) {
7491 * E2 mode: address 0-135 match to the mapping memory;
7492 * 136 - PF0 default prod; 137 - PF1 default prod;
7493 * 138 - PF2 default prod; 139 - PF3 default prod;
7494 * 140 - PF0 attn prod; 141 - PF1 attn prod;
7495 * 142 - PF2 attn prod; 143 - PF3 attn prod;
7498 * E1.5 mode - In backward compatible mode;
7499 * for non default SB; each even line in the memory
7500 * holds the U producer and each odd line hold
7501 * the C producer. The first 128 producers are for
7502 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7503 * producers are for the DSB for each PF.
7504 * Each PF has five segments: (the order inside each
7505 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7506 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7507 * 144-147 attn prods;
7509 /* non-default-status-blocks */
7510 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7511 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7512 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7513 prod_offset = (bp->igu_base_sb + sb_idx) *
7516 for (i = 0; i < num_segs; i++) {
7517 addr = IGU_REG_PROD_CONS_MEMORY +
7518 (prod_offset + i) * 4;
7519 REG_WR(bp, addr, 0);
7521 /* send consumer update with value 0 */
7522 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7523 USTORM_ID, 0, IGU_INT_NOP, 1);
7524 bnx2x_igu_clear_sb(bp,
7525 bp->igu_base_sb + sb_idx);
7528 /* default-status-blocks */
7529 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7530 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7532 if (CHIP_MODE_IS_4_PORT(bp))
7533 dsb_idx = BP_FUNC(bp);
7535 dsb_idx = BP_VN(bp);
7537 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7538 IGU_BC_BASE_DSB_PROD + dsb_idx :
7539 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7542 * igu prods come in chunks of E1HVN_MAX (4) -
7543 * does not matters what is the current chip mode
7545 for (i = 0; i < (num_segs * E1HVN_MAX);
7547 addr = IGU_REG_PROD_CONS_MEMORY +
7548 (prod_offset + i)*4;
7549 REG_WR(bp, addr, 0);
7551 /* send consumer update with 0 */
7552 if (CHIP_INT_MODE_IS_BC(bp)) {
7553 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7554 USTORM_ID, 0, IGU_INT_NOP, 1);
7555 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7556 CSTORM_ID, 0, IGU_INT_NOP, 1);
7557 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7558 XSTORM_ID, 0, IGU_INT_NOP, 1);
7559 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7560 TSTORM_ID, 0, IGU_INT_NOP, 1);
7561 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7562 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7564 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7565 USTORM_ID, 0, IGU_INT_NOP, 1);
7566 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7567 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7569 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7571 /* !!! these should become driver const once
7572 rf-tool supports split-68 const */
7573 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7574 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7575 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7576 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7577 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7578 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7582 /* Reset PCIE errors for debug */
7583 REG_WR(bp, 0x2114, 0xffffffff);
7584 REG_WR(bp, 0x2120, 0xffffffff);
7586 if (CHIP_IS_E1x(bp)) {
7587 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7588 main_mem_base = HC_REG_MAIN_MEMORY +
7589 BP_PORT(bp) * (main_mem_size * 4);
7590 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7593 val = REG_RD(bp, main_mem_prty_clr);
7596 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7599 /* Clear "false" parity errors in MSI-X table */
7600 for (i = main_mem_base;
7601 i < main_mem_base + main_mem_size * 4;
7602 i += main_mem_width) {
7603 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7604 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7605 i, main_mem_width / 4);
7607 /* Clear HC parity attention */
7608 REG_RD(bp, main_mem_prty_clr);
7611 #ifdef BNX2X_STOP_ON_ERROR
7612 /* Enable STORMs SP logging */
7613 REG_WR8(bp, BAR_USTRORM_INTMEM +
7614 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7615 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7616 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7617 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7618 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7619 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7620 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7623 bnx2x_phy_probe(&bp->link_params);
7629 void bnx2x_free_mem_cnic(struct bnx2x *bp)
7631 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7633 if (!CHIP_IS_E1x(bp))
7634 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7635 sizeof(struct host_hc_status_block_e2));
7637 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7638 sizeof(struct host_hc_status_block_e1x));
7640 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7643 void bnx2x_free_mem(struct bnx2x *bp)
7648 bnx2x_free_fp_mem(bp);
7649 /* end of fastpath */
7651 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7652 sizeof(struct host_sp_status_block));
7654 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7655 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7657 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7658 sizeof(struct bnx2x_slowpath));
7660 for (i = 0; i < L2_ILT_LINES(bp); i++)
7661 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7662 bp->context[i].size);
7663 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7665 BNX2X_FREE(bp->ilt->lines);
7667 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7669 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7670 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7673 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7676 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
7678 /* number of queues for statistics is number of eth queues + FCoE */
7679 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
7681 /* Total number of FW statistics requests =
7682 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
7685 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
7688 /* Request is built from stats_query_header and an array of
7689 * stats_query_cmd_group each of which contains
7690 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
7691 * configured in the stats_query_header.
7693 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
7694 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
7696 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
7697 num_groups * sizeof(struct stats_query_cmd_group);
7699 /* Data for statistics requests + stats_conter
7701 * stats_counter holds per-STORM counters that are incremented
7702 * when STORM has finished with the current request.
7704 * memory for FCoE offloaded statistics are counted anyway,
7705 * even if they will not be sent.
7707 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
7708 sizeof(struct per_pf_stats) +
7709 sizeof(struct fcoe_statistics_params) +
7710 sizeof(struct per_queue_stats) * num_queue_stats +
7711 sizeof(struct stats_counter);
7713 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
7714 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7717 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
7718 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
7720 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
7721 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
7723 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
7724 bp->fw_stats_req_sz;
7728 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7729 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7730 BNX2X_ERR("Can't allocate memory\n");
7734 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7736 if (!CHIP_IS_E1x(bp))
7737 /* size = the status block + ramrod buffers */
7738 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7739 sizeof(struct host_hc_status_block_e2));
7741 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7742 &bp->cnic_sb_mapping,
7744 host_hc_status_block_e1x));
7746 if (CONFIGURE_NIC_MODE(bp))
7747 /* allocate searcher T2 table, as it wan't allocated before */
7748 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7750 /* write address to which L5 should insert its values */
7751 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7752 &bp->slowpath->drv_info_to_mcp;
7754 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7760 bnx2x_free_mem_cnic(bp);
7761 BNX2X_ERR("Can't allocate memory\n");
7765 int bnx2x_alloc_mem(struct bnx2x *bp)
7767 int i, allocated, context_size;
7769 if (!CONFIGURE_NIC_MODE(bp))
7770 /* allocate searcher T2 table */
7771 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7773 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7774 sizeof(struct host_sp_status_block));
7776 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7777 sizeof(struct bnx2x_slowpath));
7779 /* Allocated memory for FW statistics */
7780 if (bnx2x_alloc_fw_stats_mem(bp))
7783 /* Allocate memory for CDU context:
7784 * This memory is allocated separately and not in the generic ILT
7785 * functions because CDU differs in few aspects:
7786 * 1. There are multiple entities allocating memory for context -
7787 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7788 * its own ILT lines.
7789 * 2. Since CDU page-size is not a single 4KB page (which is the case
7790 * for the other ILT clients), to be efficient we want to support
7791 * allocation of sub-page-size in the last entry.
7792 * 3. Context pointers are used by the driver to pass to FW / update
7793 * the context (for the other ILT clients the pointers are used just to
7794 * free the memory during unload).
7796 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7798 for (i = 0, allocated = 0; allocated < context_size; i++) {
7799 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7800 (context_size - allocated));
7801 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7802 &bp->context[i].cxt_mapping,
7803 bp->context[i].size);
7804 allocated += bp->context[i].size;
7806 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7808 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7811 /* Slow path ring */
7812 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7815 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7816 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7820 /* need to be done at the end, since it's self adjusting to amount
7821 * of memory available for RSS queues
7823 if (bnx2x_alloc_fp_mem(bp))
7829 BNX2X_ERR("Can't allocate memory\n");
7834 * Init service functions
7837 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7838 struct bnx2x_vlan_mac_obj *obj, bool set,
7839 int mac_type, unsigned long *ramrod_flags)
7842 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
7844 memset(&ramrod_param, 0, sizeof(ramrod_param));
7846 /* Fill general parameters */
7847 ramrod_param.vlan_mac_obj = obj;
7848 ramrod_param.ramrod_flags = *ramrod_flags;
7850 /* Fill a user request section if needed */
7851 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
7852 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
7854 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
7856 /* Set the command: ADD or DEL */
7858 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
7860 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
7863 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7865 if (rc == -EEXIST) {
7866 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
7867 /* do not treat adding same MAC as error */
7870 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7875 int bnx2x_del_all_macs(struct bnx2x *bp,
7876 struct bnx2x_vlan_mac_obj *mac_obj,
7877 int mac_type, bool wait_for_comp)
7880 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
7882 /* Wait for completion of requested */
7884 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7886 /* Set the mac type of addresses we want to clear */
7887 __set_bit(mac_type, &vlan_mac_flags);
7889 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7891 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
7896 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7898 unsigned long ramrod_flags = 0;
7900 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7901 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7902 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7903 "Ignoring Zero MAC for STORAGE SD mode\n");
7907 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7909 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7910 /* Eth MAC is set on RSS leading client (fp[0]) */
7911 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7912 set, BNX2X_ETH_MAC, &ramrod_flags);
7915 int bnx2x_setup_leading(struct bnx2x *bp)
7917 return bnx2x_setup_queue(bp, &bp->fp[0], 1);
7921 * bnx2x_set_int_mode - configure interrupt mode
7923 * @bp: driver handle
7925 * In case of MSI-X it will also try to enable MSI-X.
7927 int bnx2x_set_int_mode(struct bnx2x *bp)
7931 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
7935 case BNX2X_INT_MODE_MSIX:
7936 /* attempt to enable msix */
7937 rc = bnx2x_enable_msix(bp);
7943 /* vfs use only msix */
7944 if (rc && IS_VF(bp))
7947 /* failed to enable multiple MSI-X */
7948 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7950 1 + bp->num_cnic_queues);
7952 /* falling through... */
7953 case BNX2X_INT_MODE_MSI:
7954 bnx2x_enable_msi(bp);
7956 /* falling through... */
7957 case BNX2X_INT_MODE_INTX:
7958 bp->num_ethernet_queues = 1;
7959 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7960 BNX2X_DEV_INFO("set number of queues to 1\n");
7963 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
7969 /* must be called prior to any HW initializations */
7970 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
7973 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
7974 return L2_ILT_LINES(bp);
7977 void bnx2x_ilt_set_info(struct bnx2x *bp)
7979 struct ilt_client_info *ilt_client;
7980 struct bnx2x_ilt *ilt = BP_ILT(bp);
7983 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
7984 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
7987 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
7988 ilt_client->client_num = ILT_CLIENT_CDU;
7989 ilt_client->page_size = CDU_ILT_PAGE_SZ;
7990 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7991 ilt_client->start = line;
7992 line += bnx2x_cid_ilt_lines(bp);
7994 if (CNIC_SUPPORT(bp))
7995 line += CNIC_ILT_LINES;
7996 ilt_client->end = line - 1;
7998 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8001 ilt_client->page_size,
8003 ilog2(ilt_client->page_size >> 12));
8006 if (QM_INIT(bp->qm_cid_count)) {
8007 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8008 ilt_client->client_num = ILT_CLIENT_QM;
8009 ilt_client->page_size = QM_ILT_PAGE_SZ;
8010 ilt_client->flags = 0;
8011 ilt_client->start = line;
8013 /* 4 bytes for each cid */
8014 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8017 ilt_client->end = line - 1;
8020 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8023 ilt_client->page_size,
8025 ilog2(ilt_client->page_size >> 12));
8029 if (CNIC_SUPPORT(bp)) {
8031 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8032 ilt_client->client_num = ILT_CLIENT_SRC;
8033 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8034 ilt_client->flags = 0;
8035 ilt_client->start = line;
8036 line += SRC_ILT_LINES;
8037 ilt_client->end = line - 1;
8040 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8043 ilt_client->page_size,
8045 ilog2(ilt_client->page_size >> 12));
8048 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8049 ilt_client->client_num = ILT_CLIENT_TM;
8050 ilt_client->page_size = TM_ILT_PAGE_SZ;
8051 ilt_client->flags = 0;
8052 ilt_client->start = line;
8053 line += TM_ILT_LINES;
8054 ilt_client->end = line - 1;
8057 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8060 ilt_client->page_size,
8062 ilog2(ilt_client->page_size >> 12));
8065 BUG_ON(line > ILT_MAX_LINES);
8069 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8071 * @bp: driver handle
8072 * @fp: pointer to fastpath
8073 * @init_params: pointer to parameters structure
8075 * parameters configured:
8076 * - HC configuration
8077 * - Queue's CDU context
8079 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8080 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8084 int cxt_index, cxt_offset;
8086 /* FCoE Queue uses Default SB, thus has no HC capabilities */
8087 if (!IS_FCOE_FP(fp)) {
8088 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8089 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8091 /* If HC is supporterd, enable host coalescing in the transition
8094 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8095 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8098 init_params->rx.hc_rate = bp->rx_ticks ?
8099 (1000000 / bp->rx_ticks) : 0;
8100 init_params->tx.hc_rate = bp->tx_ticks ?
8101 (1000000 / bp->tx_ticks) : 0;
8104 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8108 * CQ index among the SB indices: FCoE clients uses the default
8109 * SB, therefore it's different.
8111 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8112 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8115 /* set maximum number of COSs supported by this queue */
8116 init_params->max_cos = fp->max_cos;
8118 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8119 fp->index, init_params->max_cos);
8121 /* set the context pointers queue object */
8122 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8123 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8124 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8126 init_params->cxts[cos] =
8127 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8131 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8132 struct bnx2x_queue_state_params *q_params,
8133 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8134 int tx_index, bool leading)
8136 memset(tx_only_params, 0, sizeof(*tx_only_params));
8138 /* Set the command */
8139 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8141 /* Set tx-only QUEUE flags: don't zero statistics */
8142 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8144 /* choose the index of the cid to send the slow path on */
8145 tx_only_params->cid_index = tx_index;
8147 /* Set general TX_ONLY_SETUP parameters */
8148 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8150 /* Set Tx TX_ONLY_SETUP parameters */
8151 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8154 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8155 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8156 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8157 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8159 /* send the ramrod */
8160 return bnx2x_queue_state_change(bp, q_params);
8165 * bnx2x_setup_queue - setup queue
8167 * @bp: driver handle
8168 * @fp: pointer to fastpath
8169 * @leading: is leading
8171 * This function performs 2 steps in a Queue state machine
8172 * actually: 1) RESET->INIT 2) INIT->SETUP
8175 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8178 struct bnx2x_queue_state_params q_params = {NULL};
8179 struct bnx2x_queue_setup_params *setup_params =
8180 &q_params.params.setup;
8181 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8182 &q_params.params.tx_only;
8186 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8188 /* reset IGU state skip FCoE L2 queue */
8189 if (!IS_FCOE_FP(fp))
8190 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8193 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8194 /* We want to wait for completion in this context */
8195 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8197 /* Prepare the INIT parameters */
8198 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8200 /* Set the command */
8201 q_params.cmd = BNX2X_Q_CMD_INIT;
8203 /* Change the state to INIT */
8204 rc = bnx2x_queue_state_change(bp, &q_params);
8206 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8210 DP(NETIF_MSG_IFUP, "init complete\n");
8213 /* Now move the Queue to the SETUP state... */
8214 memset(setup_params, 0, sizeof(*setup_params));
8216 /* Set QUEUE flags */
8217 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8219 /* Set general SETUP parameters */
8220 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8221 FIRST_TX_COS_INDEX);
8223 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8224 &setup_params->rxq_params);
8226 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8227 FIRST_TX_COS_INDEX);
8229 /* Set the command */
8230 q_params.cmd = BNX2X_Q_CMD_SETUP;
8233 bp->fcoe_init = true;
8235 /* Change the state to SETUP */
8236 rc = bnx2x_queue_state_change(bp, &q_params);
8238 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8242 /* loop through the relevant tx-only indices */
8243 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8244 tx_index < fp->max_cos;
8247 /* prepare and send tx-only ramrod*/
8248 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8249 tx_only_params, tx_index, leading);
8251 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8252 fp->index, tx_index);
8260 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8262 struct bnx2x_fastpath *fp = &bp->fp[index];
8263 struct bnx2x_fp_txdata *txdata;
8264 struct bnx2x_queue_state_params q_params = {NULL};
8267 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8269 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8270 /* We want to wait for completion in this context */
8271 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8274 /* close tx-only connections */
8275 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8276 tx_index < fp->max_cos;
8279 /* ascertain this is a normal queue*/
8280 txdata = fp->txdata_ptr[tx_index];
8282 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8285 /* send halt terminate on tx-only connection */
8286 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8287 memset(&q_params.params.terminate, 0,
8288 sizeof(q_params.params.terminate));
8289 q_params.params.terminate.cid_index = tx_index;
8291 rc = bnx2x_queue_state_change(bp, &q_params);
8295 /* send halt terminate on tx-only connection */
8296 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8297 memset(&q_params.params.cfc_del, 0,
8298 sizeof(q_params.params.cfc_del));
8299 q_params.params.cfc_del.cid_index = tx_index;
8300 rc = bnx2x_queue_state_change(bp, &q_params);
8304 /* Stop the primary connection: */
8305 /* ...halt the connection */
8306 q_params.cmd = BNX2X_Q_CMD_HALT;
8307 rc = bnx2x_queue_state_change(bp, &q_params);
8311 /* ...terminate the connection */
8312 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8313 memset(&q_params.params.terminate, 0,
8314 sizeof(q_params.params.terminate));
8315 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8316 rc = bnx2x_queue_state_change(bp, &q_params);
8319 /* ...delete cfc entry */
8320 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8321 memset(&q_params.params.cfc_del, 0,
8322 sizeof(q_params.params.cfc_del));
8323 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8324 return bnx2x_queue_state_change(bp, &q_params);
8328 static void bnx2x_reset_func(struct bnx2x *bp)
8330 int port = BP_PORT(bp);
8331 int func = BP_FUNC(bp);
8334 /* Disable the function in the FW */
8335 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8337 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8338 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8341 for_each_eth_queue(bp, i) {
8342 struct bnx2x_fastpath *fp = &bp->fp[i];
8343 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8344 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8348 if (CNIC_LOADED(bp))
8350 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8351 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8352 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8355 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8356 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8359 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8360 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8364 if (bp->common.int_block == INT_BLOCK_HC) {
8365 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8366 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8368 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8369 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8372 if (CNIC_LOADED(bp)) {
8373 /* Disable Timer scan */
8374 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8376 * Wait for at least 10ms and up to 2 second for the timers
8379 for (i = 0; i < 200; i++) {
8381 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8386 bnx2x_clear_func_ilt(bp, func);
8388 /* Timers workaround bug for E2: if this is vnic-3,
8389 * we need to set the entire ilt range for this timers.
8391 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8392 struct ilt_client_info ilt_cli;
8393 /* use dummy TM client */
8394 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8396 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8397 ilt_cli.client_num = ILT_CLIENT_TM;
8399 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8402 /* this assumes that reset_port() called before reset_func()*/
8403 if (!CHIP_IS_E1x(bp))
8404 bnx2x_pf_disable(bp);
8409 static void bnx2x_reset_port(struct bnx2x *bp)
8411 int port = BP_PORT(bp);
8414 /* Reset physical Link */
8415 bnx2x__link_reset(bp);
8417 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8419 /* Do not rcv packets to BRB */
8420 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8421 /* Do not direct rcv packets that are not for MCP to the BRB */
8422 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8423 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8426 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8429 /* Check for BRB port occupancy */
8430 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8432 DP(NETIF_MSG_IFDOWN,
8433 "BRB1 is not empty %d blocks are occupied\n", val);
8435 /* TODO: Close Doorbell port? */
8438 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8440 struct bnx2x_func_state_params func_params = {NULL};
8442 /* Prepare parameters for function state transitions */
8443 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8445 func_params.f_obj = &bp->func_obj;
8446 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8448 func_params.params.hw_init.load_phase = load_code;
8450 return bnx2x_func_state_change(bp, &func_params);
8453 static int bnx2x_func_stop(struct bnx2x *bp)
8455 struct bnx2x_func_state_params func_params = {NULL};
8458 /* Prepare parameters for function state transitions */
8459 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8460 func_params.f_obj = &bp->func_obj;
8461 func_params.cmd = BNX2X_F_CMD_STOP;
8464 * Try to stop the function the 'good way'. If fails (in case
8465 * of a parity error during bnx2x_chip_cleanup()) and we are
8466 * not in a debug mode, perform a state transaction in order to
8467 * enable further HW_RESET transaction.
8469 rc = bnx2x_func_state_change(bp, &func_params);
8471 #ifdef BNX2X_STOP_ON_ERROR
8474 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8475 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8476 return bnx2x_func_state_change(bp, &func_params);
8484 * bnx2x_send_unload_req - request unload mode from the MCP.
8486 * @bp: driver handle
8487 * @unload_mode: requested function's unload mode
8489 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8491 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8494 int port = BP_PORT(bp);
8496 /* Select the UNLOAD request mode */
8497 if (unload_mode == UNLOAD_NORMAL)
8498 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8500 else if (bp->flags & NO_WOL_FLAG)
8501 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8504 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8505 u8 *mac_addr = bp->dev->dev_addr;
8509 /* The mac address is written to entries 1-4 to
8510 * preserve entry 0 which is used by the PMF
8512 u8 entry = (BP_VN(bp) + 1)*8;
8514 val = (mac_addr[0] << 8) | mac_addr[1];
8515 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8517 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8518 (mac_addr[4] << 8) | mac_addr[5];
8519 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8521 /* Enable the PME and clear the status */
8522 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
8523 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8524 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
8526 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8529 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8531 /* Send the request to the MCP */
8533 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8535 int path = BP_PATH(bp);
8537 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
8538 path, load_count[path][0], load_count[path][1],
8539 load_count[path][2]);
8540 load_count[path][0]--;
8541 load_count[path][1 + port]--;
8542 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
8543 path, load_count[path][0], load_count[path][1],
8544 load_count[path][2]);
8545 if (load_count[path][0] == 0)
8546 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8547 else if (load_count[path][1 + port] == 0)
8548 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8550 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8557 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8559 * @bp: driver handle
8560 * @keep_link: true iff link should be kept up
8562 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8564 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8566 /* Report UNLOAD_DONE to MCP */
8568 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8571 static int bnx2x_func_wait_started(struct bnx2x *bp)
8574 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8580 * (assumption: No Attention from MCP at this stage)
8581 * PMF probably in the middle of TXdisable/enable transaction
8582 * 1. Sync IRS for default SB
8583 * 2. Sync SP queue - this guarantes us that attention handling started
8584 * 3. Wait, that TXdisable/enable transaction completes
8586 * 1+2 guranty that if DCBx attention was scheduled it already changed
8587 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
8588 * received complettion for the transaction the state is TX_STOPPED.
8589 * State will return to STARTED after completion of TX_STOPPED-->STARTED
8593 /* make sure default SB ISR is done */
8595 synchronize_irq(bp->msix_table[0].vector);
8597 synchronize_irq(bp->pdev->irq);
8599 flush_workqueue(bnx2x_wq);
8601 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8602 BNX2X_F_STATE_STARTED && tout--)
8605 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8606 BNX2X_F_STATE_STARTED) {
8607 #ifdef BNX2X_STOP_ON_ERROR
8608 BNX2X_ERR("Wrong function state\n");
8612 * Failed to complete the transaction in a "good way"
8613 * Force both transactions with CLR bit
8615 struct bnx2x_func_state_params func_params = {NULL};
8617 DP(NETIF_MSG_IFDOWN,
8618 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8620 func_params.f_obj = &bp->func_obj;
8621 __set_bit(RAMROD_DRV_CLR_ONLY,
8622 &func_params.ramrod_flags);
8624 /* STARTED-->TX_ST0PPED */
8625 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8626 bnx2x_func_state_change(bp, &func_params);
8628 /* TX_ST0PPED-->STARTED */
8629 func_params.cmd = BNX2X_F_CMD_TX_START;
8630 return bnx2x_func_state_change(bp, &func_params);
8637 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8639 int port = BP_PORT(bp);
8642 struct bnx2x_mcast_ramrod_params rparam = {NULL};
8645 /* Wait until tx fastpath tasks complete */
8646 for_each_tx_queue(bp, i) {
8647 struct bnx2x_fastpath *fp = &bp->fp[i];
8649 for_each_cos_in_tx_queue(fp, cos)
8650 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8651 #ifdef BNX2X_STOP_ON_ERROR
8657 /* Give HW time to discard old tx messages */
8658 usleep_range(1000, 1000);
8660 /* Clean all ETH MACs */
8661 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8664 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8666 /* Clean up UC list */
8667 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8670 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8674 if (!CHIP_IS_E1(bp))
8675 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8677 /* Set "drop all" (stop Rx).
8678 * We need to take a netif_addr_lock() here in order to prevent
8679 * a race between the completion code and this code.
8681 netif_addr_lock_bh(bp->dev);
8682 /* Schedule the rx_mode command */
8683 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8684 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8686 bnx2x_set_storm_rx_mode(bp);
8688 /* Cleanup multicast configuration */
8689 rparam.mcast_obj = &bp->mcast_obj;
8690 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8692 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8694 netif_addr_unlock_bh(bp->dev);
8699 * Send the UNLOAD_REQUEST to the MCP. This will return if
8700 * this function should perform FUNC, PORT or COMMON HW
8703 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8706 * (assumption: No Attention from MCP at this stage)
8707 * PMF probably in the middle of TXdisable/enable transaction
8709 rc = bnx2x_func_wait_started(bp);
8711 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8712 #ifdef BNX2X_STOP_ON_ERROR
8717 /* Close multi and leading connections
8718 * Completions for ramrods are collected in a synchronous way
8720 for_each_eth_queue(bp, i)
8721 if (bnx2x_stop_queue(bp, i))
8722 #ifdef BNX2X_STOP_ON_ERROR
8728 if (CNIC_LOADED(bp)) {
8729 for_each_cnic_queue(bp, i)
8730 if (bnx2x_stop_queue(bp, i))
8731 #ifdef BNX2X_STOP_ON_ERROR
8738 /* If SP settings didn't get completed so far - something
8739 * very wrong has happen.
8741 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8742 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8744 #ifndef BNX2X_STOP_ON_ERROR
8747 rc = bnx2x_func_stop(bp);
8749 BNX2X_ERR("Function stop failed!\n");
8750 #ifdef BNX2X_STOP_ON_ERROR
8755 /* Disable HW interrupts, NAPI */
8756 bnx2x_netif_stop(bp, 1);
8757 /* Delete all NAPI objects */
8758 bnx2x_del_all_napi(bp);
8759 if (CNIC_LOADED(bp))
8760 bnx2x_del_all_napi_cnic(bp);
8765 /* Reset the chip */
8766 rc = bnx2x_reset_hw(bp, reset_code);
8768 BNX2X_ERR("HW_RESET failed\n");
8771 /* Report UNLOAD_DONE to MCP */
8772 bnx2x_send_unload_done(bp, keep_link);
8775 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8779 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
8781 if (CHIP_IS_E1(bp)) {
8782 int port = BP_PORT(bp);
8783 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8784 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8786 val = REG_RD(bp, addr);
8788 REG_WR(bp, addr, val);
8790 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8791 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8792 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8793 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8797 /* Close gates #2, #3 and #4: */
8798 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8802 /* Gates #2 and #4a are closed/opened for "not E1" only */
8803 if (!CHIP_IS_E1(bp)) {
8805 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
8807 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
8811 if (CHIP_IS_E1x(bp)) {
8812 /* Prevent interrupts from HC on both ports */
8813 val = REG_RD(bp, HC_REG_CONFIG_1);
8814 REG_WR(bp, HC_REG_CONFIG_1,
8815 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8816 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8818 val = REG_RD(bp, HC_REG_CONFIG_0);
8819 REG_WR(bp, HC_REG_CONFIG_0,
8820 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8821 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8823 /* Prevent incomming interrupts in IGU */
8824 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8826 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8828 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8829 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8832 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
8833 close ? "closing" : "opening");
8837 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8839 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8841 /* Do some magic... */
8842 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8843 *magic_val = val & SHARED_MF_CLP_MAGIC;
8844 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8848 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
8850 * @bp: driver handle
8851 * @magic_val: old value of the `magic' bit.
8853 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8855 /* Restore the `magic' bit value... */
8856 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8857 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8858 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8862 * bnx2x_reset_mcp_prep - prepare for MCP reset.
8864 * @bp: driver handle
8865 * @magic_val: old value of 'magic' bit.
8867 * Takes care of CLP configurations.
8869 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8872 u32 validity_offset;
8874 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
8876 /* Set `magic' bit in order to save MF config */
8877 if (!CHIP_IS_E1(bp))
8878 bnx2x_clp_reset_prep(bp, magic_val);
8880 /* Get shmem offset */
8881 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8883 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
8885 /* Clear validity map flags */
8887 REG_WR(bp, shmem + validity_offset, 0);
8890 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8891 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8894 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
8896 * @bp: driver handle
8898 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8900 /* special handling for emulation and FPGA,
8901 wait 10 times longer */
8902 if (CHIP_REV_IS_SLOW(bp))
8903 msleep(MCP_ONE_TIMEOUT*10);
8905 msleep(MCP_ONE_TIMEOUT);
8909 * initializes bp->common.shmem_base and waits for validity signature to appear
8911 static int bnx2x_init_shmem(struct bnx2x *bp)
8917 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8918 if (bp->common.shmem_base) {
8919 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8920 if (val & SHR_MEM_VALIDITY_MB)
8924 bnx2x_mcp_wait_one(bp);
8926 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
8928 BNX2X_ERR("BAD MCP validity signature\n");
8933 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8935 int rc = bnx2x_init_shmem(bp);
8937 /* Restore the `magic' bit value */
8938 if (!CHIP_IS_E1(bp))
8939 bnx2x_clp_reset_done(bp, magic_val);
8944 static void bnx2x_pxp_prep(struct bnx2x *bp)
8946 if (!CHIP_IS_E1(bp)) {
8947 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8948 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8954 * Reset the whole chip except for:
8956 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8959 * - MISC (including AEU)
8963 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8965 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8966 u32 global_bits2, stay_reset2;
8969 * Bits that have to be set in reset_mask2 if we want to reset 'global'
8970 * (per chip) blocks.
8973 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
8974 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
8976 /* Don't reset the following blocks.
8977 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
8978 * reset, as in 4 port device they might still be owned
8979 * by the MCP (there is only one leader per path).
8982 MISC_REGISTERS_RESET_REG_1_RST_HC |
8983 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8984 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8987 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
8988 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8989 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8990 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8991 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8992 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8993 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8994 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
8995 MISC_REGISTERS_RESET_REG_2_RST_ATC |
8996 MISC_REGISTERS_RESET_REG_2_PGLC |
8997 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8998 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8999 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9000 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9001 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9002 MISC_REGISTERS_RESET_REG_2_UMAC1;
9005 * Keep the following blocks in reset:
9006 * - all xxMACs are handled by the bnx2x_link code.
9009 MISC_REGISTERS_RESET_REG_2_XMAC |
9010 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9012 /* Full reset masks according to the chip */
9013 reset_mask1 = 0xffffffff;
9016 reset_mask2 = 0xffff;
9017 else if (CHIP_IS_E1H(bp))
9018 reset_mask2 = 0x1ffff;
9019 else if (CHIP_IS_E2(bp))
9020 reset_mask2 = 0xfffff;
9021 else /* CHIP_IS_E3 */
9022 reset_mask2 = 0x3ffffff;
9024 /* Don't reset global blocks unless we need to */
9026 reset_mask2 &= ~global_bits2;
9029 * In case of attention in the QM, we need to reset PXP
9030 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9031 * because otherwise QM reset would release 'close the gates' shortly
9032 * before resetting the PXP, then the PSWRQ would send a write
9033 * request to PGLUE. Then when PXP is reset, PGLUE would try to
9034 * read the payload data from PSWWR, but PSWWR would not
9035 * respond. The write queue in PGLUE would stuck, dmae commands
9036 * would not return. Therefore it's important to reset the second
9037 * reset register (containing the
9038 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9039 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9043 reset_mask2 & (~not_reset_mask2));
9045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9046 reset_mask1 & (~not_reset_mask1));
9051 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9052 reset_mask2 & (~stay_reset2));
9057 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9062 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9063 * It should get cleared in no more than 1s.
9065 * @bp: driver handle
9067 * It should get cleared in no more than 1s. Returns 0 if
9068 * pending writes bit gets cleared.
9070 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9076 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9081 usleep_range(1000, 1000);
9082 } while (cnt-- > 0);
9085 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9093 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9097 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9101 /* Empty the Tetris buffer, wait for 1s */
9103 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9104 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9105 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9106 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9107 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9109 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9111 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9112 ((port_is_idle_0 & 0x1) == 0x1) &&
9113 ((port_is_idle_1 & 0x1) == 0x1) &&
9114 (pgl_exp_rom2 == 0xffffffff) &&
9115 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9117 usleep_range(1000, 1000);
9118 } while (cnt-- > 0);
9121 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9122 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9123 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9130 /* Close gates #2, #3 and #4 */
9131 bnx2x_set_234_gates(bp, true);
9133 /* Poll for IGU VQs for 57712 and newer chips */
9134 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9138 /* TBD: Indicate that "process kill" is in progress to MCP */
9140 /* Clear "unprepared" bit */
9141 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9144 /* Make sure all is written to the chip before the reset */
9147 /* Wait for 1ms to empty GLUE and PCI-E core queues,
9148 * PSWHST, GRC and PSWRD Tetris buffer.
9150 usleep_range(1000, 1000);
9152 /* Prepare to chip reset: */
9155 bnx2x_reset_mcp_prep(bp, &val);
9161 /* reset the chip */
9162 bnx2x_process_kill_chip_reset(bp, global);
9165 /* Recover after reset: */
9167 if (global && bnx2x_reset_mcp_comp(bp, val))
9170 /* TBD: Add resetting the NO_MCP mode DB here */
9172 /* Open the gates #2, #3 and #4 */
9173 bnx2x_set_234_gates(bp, false);
9175 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9176 * reset state, re-enable attentions. */
9181 static int bnx2x_leader_reset(struct bnx2x *bp)
9184 bool global = bnx2x_reset_is_global(bp);
9187 /* if not going to reset MCP - load "fake" driver to reset HW while
9188 * driver is owner of the HW
9190 if (!global && !BP_NOMCP(bp)) {
9191 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9192 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9194 BNX2X_ERR("MCP response failure, aborting\n");
9196 goto exit_leader_reset;
9198 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9199 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9200 BNX2X_ERR("MCP unexpected resp, aborting\n");
9202 goto exit_leader_reset2;
9204 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9206 BNX2X_ERR("MCP response failure, aborting\n");
9208 goto exit_leader_reset2;
9212 /* Try to recover after the failure */
9213 if (bnx2x_process_kill(bp, global)) {
9214 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9217 goto exit_leader_reset2;
9221 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9224 bnx2x_set_reset_done(bp);
9226 bnx2x_clear_reset_global(bp);
9229 /* unload "fake driver" if it was loaded */
9230 if (!global && !BP_NOMCP(bp)) {
9231 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9232 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9236 bnx2x_release_leader_lock(bp);
9241 static void bnx2x_recovery_failed(struct bnx2x *bp)
9243 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9245 /* Disconnect this device */
9246 netif_device_detach(bp->dev);
9249 * Block ifup for all function on this engine until "process kill"
9252 bnx2x_set_reset_in_progress(bp);
9254 /* Shut down the power */
9255 bnx2x_set_power_state(bp, PCI_D3hot);
9257 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9263 * Assumption: runs under rtnl lock. This together with the fact
9264 * that it's called only from bnx2x_sp_rtnl() ensure that it
9265 * will never be called when netif_running(bp->dev) is false.
9267 static void bnx2x_parity_recover(struct bnx2x *bp)
9269 bool global = false;
9270 u32 error_recovered, error_unrecovered;
9273 DP(NETIF_MSG_HW, "Handling parity\n");
9275 switch (bp->recovery_state) {
9276 case BNX2X_RECOVERY_INIT:
9277 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9278 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9279 WARN_ON(!is_parity);
9281 /* Try to get a LEADER_LOCK HW lock */
9282 if (bnx2x_trylock_leader_lock(bp)) {
9283 bnx2x_set_reset_in_progress(bp);
9285 * Check if there is a global attention and if
9286 * there was a global attention, set the global
9291 bnx2x_set_reset_global(bp);
9296 /* Stop the driver */
9297 /* If interface has been removed - break */
9298 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9301 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9303 /* Ensure "is_leader", MCP command sequence and
9304 * "recovery_state" update values are seen on other
9310 case BNX2X_RECOVERY_WAIT:
9311 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9312 if (bp->is_leader) {
9313 int other_engine = BP_PATH(bp) ? 0 : 1;
9314 bool other_load_status =
9315 bnx2x_get_load_status(bp, other_engine);
9317 bnx2x_get_load_status(bp, BP_PATH(bp));
9318 global = bnx2x_reset_is_global(bp);
9321 * In case of a parity in a global block, let
9322 * the first leader that performs a
9323 * leader_reset() reset the global blocks in
9324 * order to clear global attentions. Otherwise
9325 * the the gates will remain closed for that
9329 (global && other_load_status)) {
9330 /* Wait until all other functions get
9333 schedule_delayed_work(&bp->sp_rtnl_task,
9337 /* If all other functions got down -
9338 * try to bring the chip back to
9339 * normal. In any case it's an exit
9340 * point for a leader.
9342 if (bnx2x_leader_reset(bp)) {
9343 bnx2x_recovery_failed(bp);
9347 /* If we are here, means that the
9348 * leader has succeeded and doesn't
9349 * want to be a leader any more. Try
9350 * to continue as a none-leader.
9354 } else { /* non-leader */
9355 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9356 /* Try to get a LEADER_LOCK HW lock as
9357 * long as a former leader may have
9358 * been unloaded by the user or
9359 * released a leadership by another
9362 if (bnx2x_trylock_leader_lock(bp)) {
9363 /* I'm a leader now! Restart a
9370 schedule_delayed_work(&bp->sp_rtnl_task,
9376 * If there was a global attention, wait
9377 * for it to be cleared.
9379 if (bnx2x_reset_is_global(bp)) {
9380 schedule_delayed_work(
9387 bp->eth_stats.recoverable_error;
9389 bp->eth_stats.unrecoverable_error;
9390 bp->recovery_state =
9391 BNX2X_RECOVERY_NIC_LOADING;
9392 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9393 error_unrecovered++;
9395 "Recovery failed. Power cycle needed\n");
9396 /* Disconnect this device */
9397 netif_device_detach(bp->dev);
9398 /* Shut down the power */
9399 bnx2x_set_power_state(
9403 bp->recovery_state =
9404 BNX2X_RECOVERY_DONE;
9408 bp->eth_stats.recoverable_error =
9410 bp->eth_stats.unrecoverable_error =
9422 static int bnx2x_close(struct net_device *dev);
9424 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9425 * scheduled on a general queue in order to prevent a dead lock.
9427 static void bnx2x_sp_rtnl_task(struct work_struct *work)
9429 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9433 if (!netif_running(bp->dev))
9436 /* if stop on error is defined no recovery flows should be executed */
9437 #ifdef BNX2X_STOP_ON_ERROR
9438 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9439 "you will need to reboot when done\n");
9440 goto sp_rtnl_not_reset;
9443 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9445 * Clear all pending SP commands as we are going to reset the
9448 bp->sp_rtnl_state = 0;
9451 bnx2x_parity_recover(bp);
9456 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9458 * Clear all pending SP commands as we are going to reset the
9461 bp->sp_rtnl_state = 0;
9464 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9465 bnx2x_nic_load(bp, LOAD_NORMAL);
9469 #ifdef BNX2X_STOP_ON_ERROR
9472 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9473 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9474 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9475 bnx2x_after_function_update(bp);
9477 * in case of fan failure we need to reset id if the "stop on error"
9478 * debug flag is set, since we trying to prevent permanent overheating
9481 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9482 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9483 netif_device_detach(bp->dev);
9484 bnx2x_close(bp->dev);
9487 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9489 "sending set mcast vf pf channel message from rtnl sp-task\n");
9490 bnx2x_vfpf_set_mcast(bp->dev);
9493 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
9494 &bp->sp_rtnl_state)) {
9496 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
9497 bnx2x_vfpf_storm_rx_mode(bp);
9504 /* end of nic load/unload */
9506 static void bnx2x_period_task(struct work_struct *work)
9508 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9510 if (!netif_running(bp->dev))
9511 goto period_task_exit;
9513 if (CHIP_REV_IS_SLOW(bp)) {
9514 BNX2X_ERR("period task called on emulation, ignoring\n");
9515 goto period_task_exit;
9518 bnx2x_acquire_phy_lock(bp);
9520 * The barrier is needed to ensure the ordering between the writing to
9521 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9526 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9528 /* Re-queue task in 1 sec */
9529 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9532 bnx2x_release_phy_lock(bp);
9538 * Init service functions
9541 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9543 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9544 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9545 return base + (BP_ABS_FUNC(bp)) * stride;
9548 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
9550 u32 reg = bnx2x_get_pretend_reg(bp);
9552 /* Flush all outstanding writes */
9555 /* Pretend to be function 0 */
9557 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
9559 /* From now we are in the "like-E1" mode */
9560 bnx2x_int_disable(bp);
9562 /* Flush all outstanding writes */
9565 /* Restore the original function */
9566 REG_WR(bp, reg, BP_ABS_FUNC(bp));
9570 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
9573 bnx2x_int_disable(bp);
9575 bnx2x_undi_int_disable_e1h(bp);
9578 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp)
9580 u32 val, base_addr, offset, mask, reset_reg;
9581 bool mac_stopped = false;
9582 u8 port = BP_PORT(bp);
9584 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9586 if (!CHIP_IS_E3(bp)) {
9587 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9588 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9589 if ((mask & reset_reg) && val) {
9591 BNX2X_DEV_INFO("Disable bmac Rx\n");
9592 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9593 : NIG_REG_INGRESS_BMAC0_MEM;
9594 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9595 : BIGMAC_REGISTER_BMAC_CONTROL;
9598 * use rd/wr since we cannot use dmae. This is safe
9599 * since MCP won't access the bus due to the request
9600 * to unload, and no function on the path can be
9601 * loaded at this time.
9603 wb_data[0] = REG_RD(bp, base_addr + offset);
9604 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9605 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9606 REG_WR(bp, base_addr + offset, wb_data[0]);
9607 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
9610 BNX2X_DEV_INFO("Disable emac Rx\n");
9611 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
9615 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9616 BNX2X_DEV_INFO("Disable xmac Rx\n");
9617 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9618 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9619 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9621 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9623 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
9626 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9627 if (mask & reset_reg) {
9628 BNX2X_DEV_INFO("Disable umac Rx\n");
9629 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9630 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
9640 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9641 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9642 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9643 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9645 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9648 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9650 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9651 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9653 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9654 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9656 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9660 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9662 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9663 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9665 BNX2X_ERR("MCP response failure, aborting\n");
9672 static struct bnx2x_prev_path_list *
9673 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9675 struct bnx2x_prev_path_list *tmp_list;
9677 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9678 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9679 bp->pdev->bus->number == tmp_list->bus &&
9680 BP_PATH(bp) == tmp_list->path)
9686 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9688 struct bnx2x_prev_path_list *tmp_list;
9691 if (down_trylock(&bnx2x_prev_sem))
9694 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
9695 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9696 bp->pdev->bus->number == tmp_list->bus &&
9697 BP_PATH(bp) == tmp_list->path) {
9699 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9705 up(&bnx2x_prev_sem);
9710 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9712 struct bnx2x_prev_path_list *tmp_list;
9715 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9717 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9721 tmp_list->bus = bp->pdev->bus->number;
9722 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9723 tmp_list->path = BP_PATH(bp);
9724 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9726 rc = down_interruptible(&bnx2x_prev_sem);
9728 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9731 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
9733 list_add(&tmp_list->list, &bnx2x_prev_list);
9734 up(&bnx2x_prev_sem);
9740 static int bnx2x_do_flr(struct bnx2x *bp)
9744 struct pci_dev *dev = bp->pdev;
9747 if (CHIP_IS_E1x(bp)) {
9748 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9752 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9753 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9754 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9759 /* Wait for Transaction Pending bit clean */
9760 for (i = 0; i < 4; i++) {
9762 msleep((1 << (i - 1)) * 100);
9764 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9765 if (!(status & PCI_EXP_DEVSTA_TRPND))
9770 "transaction is not cleared; proceeding with reset anyway\n");
9774 BNX2X_DEV_INFO("Initiating FLR\n");
9775 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9780 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9784 BNX2X_DEV_INFO("Uncommon unload Flow\n");
9786 /* Test if previous unload process was already finished for this path */
9787 if (bnx2x_prev_is_path_marked(bp))
9788 return bnx2x_prev_mcp_done(bp);
9790 /* If function has FLR capabilities, and existing FW version matches
9791 * the one required, then FLR will be sufficient to clean any residue
9792 * left by previous driver
9794 rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
9797 /* fw version is good */
9798 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
9799 rc = bnx2x_do_flr(bp);
9803 /* FLR was performed */
9804 BNX2X_DEV_INFO("FLR successful\n");
9808 BNX2X_DEV_INFO("Could not FLR\n");
9810 /* Close the MCP request, return failure*/
9811 rc = bnx2x_prev_mcp_done(bp);
9813 rc = BNX2X_PREV_WAIT_NEEDED;
9818 static int bnx2x_prev_unload_common(struct bnx2x *bp)
9820 u32 reset_reg, tmp_reg = 0, rc;
9821 bool prev_undi = false;
9822 /* It is possible a previous function received 'common' answer,
9823 * but hasn't loaded yet, therefore creating a scenario of
9824 * multiple functions receiving 'common' on the same path.
9826 BNX2X_DEV_INFO("Common unload Flow\n");
9828 if (bnx2x_prev_is_path_marked(bp))
9829 return bnx2x_prev_mcp_done(bp);
9831 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9833 /* Reset should be performed after BRB is emptied */
9834 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9835 u32 timer_count = 1000;
9837 /* Close the MAC Rx to prevent BRB from filling up */
9838 bnx2x_prev_unload_close_mac(bp);
9840 /* Check if the UNDI driver was previously loaded
9841 * UNDI driver initializes CID offset for normal bell to 0x7
9843 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9844 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
9845 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9846 if (tmp_reg == 0x7) {
9847 BNX2X_DEV_INFO("UNDI previously loaded\n");
9849 /* clear the UNDI indication */
9850 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9853 /* wait until BRB is empty */
9854 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9855 while (timer_count) {
9856 u32 prev_brb = tmp_reg;
9858 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9862 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
9864 /* reset timer as long as BRB actually gets emptied */
9865 if (prev_brb > tmp_reg)
9870 /* If UNDI resides in memory, manually increment it */
9872 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
9878 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
9882 /* No packets are in the pipeline, path is ready for reset */
9883 bnx2x_reset_common(bp);
9885 rc = bnx2x_prev_mark_path(bp, prev_undi);
9887 bnx2x_prev_mcp_done(bp);
9891 return bnx2x_prev_mcp_done(bp);
9894 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
9895 * and boot began, or when kdump kernel was loaded. Either case would invalidate
9896 * the addresses of the transaction, resulting in was-error bit set in the pci
9897 * causing all hw-to-host pcie transactions to timeout. If this happened we want
9898 * to clear the interrupt which detected this from the pglueb and the was done
9901 static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9903 if (!CHIP_IS_E1x(bp)) {
9904 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9905 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9906 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9907 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
9913 static int bnx2x_prev_unload(struct bnx2x *bp)
9915 int time_counter = 10;
9916 u32 rc, fw, hw_lock_reg, hw_lock_val;
9917 struct bnx2x_prev_path_list *prev_list;
9918 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9920 /* clear hw from errors which may have resulted from an interrupted
9923 bnx2x_prev_interrupted_dmae(bp);
9925 /* Release previously held locks */
9926 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9927 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9928 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
9930 hw_lock_val = (REG_RD(bp, hw_lock_reg));
9932 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
9933 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
9934 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9935 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
9938 BNX2X_DEV_INFO("Release Previously held hw lock\n");
9939 REG_WR(bp, hw_lock_reg, 0xffffffff);
9941 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
9943 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
9944 BNX2X_DEV_INFO("Release previously held alr\n");
9945 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
9950 /* Lock MCP using an unload request */
9951 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9953 BNX2X_ERR("MCP response failure, aborting\n");
9958 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9959 rc = bnx2x_prev_unload_common(bp);
9963 /* non-common reply from MCP night require looping */
9964 rc = bnx2x_prev_unload_uncommon(bp);
9965 if (rc != BNX2X_PREV_WAIT_NEEDED)
9969 } while (--time_counter);
9971 if (!time_counter || rc) {
9972 BNX2X_ERR("Failed unloading previous driver, aborting\n");
9976 /* Mark function if its port was used to boot from SAN */
9977 prev_list = bnx2x_prev_path_get_entry(bp);
9978 if (prev_list && (prev_list->undi & (1 << BP_PORT(bp))))
9979 bp->link_params.feature_config_flags |=
9980 FEATURE_CONFIG_BOOT_FROM_SAN;
9982 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
9987 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
9989 u32 val, val2, val3, val4, id, boot_mode;
9992 /* Get the chip revision id and number. */
9993 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9994 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9995 id = ((val & 0xffff) << 16);
9996 val = REG_RD(bp, MISC_REG_CHIP_REV);
9997 id |= ((val & 0xf) << 12);
9998 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9999 id |= ((val & 0xff) << 4);
10000 val = REG_RD(bp, MISC_REG_BOND_ID);
10002 bp->common.chip_id = id;
10004 /* force 57811 according to MISC register */
10005 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10006 if (CHIP_IS_57810(bp))
10007 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10008 (bp->common.chip_id & 0x0000FFFF);
10009 else if (CHIP_IS_57810_MF(bp))
10010 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10011 (bp->common.chip_id & 0x0000FFFF);
10012 bp->common.chip_id |= 0x1;
10015 /* Set doorbell size */
10016 bp->db_size = (1 << BNX2X_DB_SHIFT);
10018 if (!CHIP_IS_E1x(bp)) {
10019 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10020 if ((val & 1) == 0)
10021 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10023 val = (val >> 1) & 1;
10024 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10026 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10029 if (CHIP_MODE_IS_4_PORT(bp))
10030 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
10032 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
10034 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10035 bp->pfid = bp->pf_num; /* 0..7 */
10038 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10040 bp->link_params.chip_id = bp->common.chip_id;
10041 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10043 val = (REG_RD(bp, 0x2874) & 0x55);
10044 if ((bp->common.chip_id & 0x1) ||
10045 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10046 bp->flags |= ONE_PORT_FLAG;
10047 BNX2X_DEV_INFO("single port device\n");
10050 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10051 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10052 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10053 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10054 bp->common.flash_size, bp->common.flash_size);
10056 bnx2x_init_shmem(bp);
10060 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10061 MISC_REG_GENERIC_CR_1 :
10062 MISC_REG_GENERIC_CR_0));
10064 bp->link_params.shmem_base = bp->common.shmem_base;
10065 bp->link_params.shmem2_base = bp->common.shmem2_base;
10066 if (SHMEM2_RD(bp, size) >
10067 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10068 bp->link_params.lfa_base =
10069 REG_RD(bp, bp->common.shmem2_base +
10070 (u32)offsetof(struct shmem2_region,
10071 lfa_host_addr[BP_PORT(bp)]));
10073 bp->link_params.lfa_base = 0;
10074 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10075 bp->common.shmem_base, bp->common.shmem2_base);
10077 if (!bp->common.shmem_base) {
10078 BNX2X_DEV_INFO("MCP not active\n");
10079 bp->flags |= NO_MCP_FLAG;
10083 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10084 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10086 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10087 SHARED_HW_CFG_LED_MODE_MASK) >>
10088 SHARED_HW_CFG_LED_MODE_SHIFT);
10090 bp->link_params.feature_config_flags = 0;
10091 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10092 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10093 bp->link_params.feature_config_flags |=
10094 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10096 bp->link_params.feature_config_flags &=
10097 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10099 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10100 bp->common.bc_ver = val;
10101 BNX2X_DEV_INFO("bc_ver %X\n", val);
10102 if (val < BNX2X_BC_VER) {
10103 /* for now only warn
10104 * later we might need to enforce this */
10105 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10106 BNX2X_BC_VER, val);
10108 bp->link_params.feature_config_flags |=
10109 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10110 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10112 bp->link_params.feature_config_flags |=
10113 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10114 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10115 bp->link_params.feature_config_flags |=
10116 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10117 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10118 bp->link_params.feature_config_flags |=
10119 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10120 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10122 bp->link_params.feature_config_flags |=
10123 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10124 FEATURE_CONFIG_MT_SUPPORT : 0;
10126 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10127 BC_SUPPORTS_PFC_STATS : 0;
10129 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10130 BC_SUPPORTS_FCOE_FEATURES : 0;
10132 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10133 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10134 boot_mode = SHMEM_RD(bp,
10135 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10136 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10137 switch (boot_mode) {
10138 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10139 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10141 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10142 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10144 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10145 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10147 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10148 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10152 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
10153 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10155 BNX2X_DEV_INFO("%sWoL capable\n",
10156 (bp->flags & NO_WOL_FLAG) ? "not " : "");
10158 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10159 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10160 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10161 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10163 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10164 val, val2, val3, val4);
10167 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10168 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10170 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10172 int pfid = BP_FUNC(bp);
10175 u8 fid, igu_sb_cnt = 0;
10177 bp->igu_base_sb = 0xff;
10178 if (CHIP_INT_MODE_IS_BC(bp)) {
10179 int vn = BP_VN(bp);
10180 igu_sb_cnt = bp->igu_sb_cnt;
10181 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10184 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10185 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10190 /* IGU in normal mode - read CAM */
10191 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10193 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10194 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10196 fid = IGU_FID(val);
10197 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10198 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10200 if (IGU_VEC(val) == 0)
10201 /* default status block */
10202 bp->igu_dsb_id = igu_sb_id;
10204 if (bp->igu_base_sb == 0xff)
10205 bp->igu_base_sb = igu_sb_id;
10211 #ifdef CONFIG_PCI_MSI
10212 /* Due to new PF resource allocation by MFW T7.4 and above, it's
10213 * optional that number of CAM entries will not be equal to the value
10214 * advertised in PCI.
10215 * Driver should use the minimal value of both as the actual status
10218 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10221 if (igu_sb_cnt == 0) {
10222 BNX2X_ERR("CAM configuration error\n");
10229 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10231 int cfg_size = 0, idx, port = BP_PORT(bp);
10233 /* Aggregation of supported attributes of all external phys */
10234 bp->port.supported[0] = 0;
10235 bp->port.supported[1] = 0;
10236 switch (bp->link_params.num_phys) {
10238 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10242 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10246 if (bp->link_params.multi_phy_config &
10247 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10248 bp->port.supported[1] =
10249 bp->link_params.phy[EXT_PHY1].supported;
10250 bp->port.supported[0] =
10251 bp->link_params.phy[EXT_PHY2].supported;
10253 bp->port.supported[0] =
10254 bp->link_params.phy[EXT_PHY1].supported;
10255 bp->port.supported[1] =
10256 bp->link_params.phy[EXT_PHY2].supported;
10262 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10263 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10265 dev_info.port_hw_config[port].external_phy_config),
10267 dev_info.port_hw_config[port].external_phy_config2));
10271 if (CHIP_IS_E3(bp))
10272 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10274 switch (switch_cfg) {
10275 case SWITCH_CFG_1G:
10276 bp->port.phy_addr = REG_RD(
10277 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10279 case SWITCH_CFG_10G:
10280 bp->port.phy_addr = REG_RD(
10281 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10284 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10285 bp->port.link_config[0]);
10289 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10290 /* mask what we support according to speed_cap_mask per configuration */
10291 for (idx = 0; idx < cfg_size; idx++) {
10292 if (!(bp->link_params.speed_cap_mask[idx] &
10293 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10294 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10296 if (!(bp->link_params.speed_cap_mask[idx] &
10297 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10298 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10300 if (!(bp->link_params.speed_cap_mask[idx] &
10301 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10302 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10304 if (!(bp->link_params.speed_cap_mask[idx] &
10305 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10306 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10308 if (!(bp->link_params.speed_cap_mask[idx] &
10309 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10310 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10311 SUPPORTED_1000baseT_Full);
10313 if (!(bp->link_params.speed_cap_mask[idx] &
10314 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10315 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10317 if (!(bp->link_params.speed_cap_mask[idx] &
10318 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10319 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10323 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10324 bp->port.supported[1]);
10327 static void bnx2x_link_settings_requested(struct bnx2x *bp)
10329 u32 link_config, idx, cfg_size = 0;
10330 bp->port.advertising[0] = 0;
10331 bp->port.advertising[1] = 0;
10332 switch (bp->link_params.num_phys) {
10341 for (idx = 0; idx < cfg_size; idx++) {
10342 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10343 link_config = bp->port.link_config[idx];
10344 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10345 case PORT_FEATURE_LINK_SPEED_AUTO:
10346 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10347 bp->link_params.req_line_speed[idx] =
10349 bp->port.advertising[idx] |=
10350 bp->port.supported[idx];
10351 if (bp->link_params.phy[EXT_PHY1].type ==
10352 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10353 bp->port.advertising[idx] |=
10354 (SUPPORTED_100baseT_Half |
10355 SUPPORTED_100baseT_Full);
10357 /* force 10G, no AN */
10358 bp->link_params.req_line_speed[idx] =
10360 bp->port.advertising[idx] |=
10361 (ADVERTISED_10000baseT_Full |
10367 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10368 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10369 bp->link_params.req_line_speed[idx] =
10371 bp->port.advertising[idx] |=
10372 (ADVERTISED_10baseT_Full |
10375 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10377 bp->link_params.speed_cap_mask[idx]);
10382 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10383 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10384 bp->link_params.req_line_speed[idx] =
10386 bp->link_params.req_duplex[idx] =
10388 bp->port.advertising[idx] |=
10389 (ADVERTISED_10baseT_Half |
10392 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10394 bp->link_params.speed_cap_mask[idx]);
10399 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10400 if (bp->port.supported[idx] &
10401 SUPPORTED_100baseT_Full) {
10402 bp->link_params.req_line_speed[idx] =
10404 bp->port.advertising[idx] |=
10405 (ADVERTISED_100baseT_Full |
10408 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10410 bp->link_params.speed_cap_mask[idx]);
10415 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10416 if (bp->port.supported[idx] &
10417 SUPPORTED_100baseT_Half) {
10418 bp->link_params.req_line_speed[idx] =
10420 bp->link_params.req_duplex[idx] =
10422 bp->port.advertising[idx] |=
10423 (ADVERTISED_100baseT_Half |
10426 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10428 bp->link_params.speed_cap_mask[idx]);
10433 case PORT_FEATURE_LINK_SPEED_1G:
10434 if (bp->port.supported[idx] &
10435 SUPPORTED_1000baseT_Full) {
10436 bp->link_params.req_line_speed[idx] =
10438 bp->port.advertising[idx] |=
10439 (ADVERTISED_1000baseT_Full |
10442 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10444 bp->link_params.speed_cap_mask[idx]);
10449 case PORT_FEATURE_LINK_SPEED_2_5G:
10450 if (bp->port.supported[idx] &
10451 SUPPORTED_2500baseX_Full) {
10452 bp->link_params.req_line_speed[idx] =
10454 bp->port.advertising[idx] |=
10455 (ADVERTISED_2500baseX_Full |
10458 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10460 bp->link_params.speed_cap_mask[idx]);
10465 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10466 if (bp->port.supported[idx] &
10467 SUPPORTED_10000baseT_Full) {
10468 bp->link_params.req_line_speed[idx] =
10470 bp->port.advertising[idx] |=
10471 (ADVERTISED_10000baseT_Full |
10474 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10476 bp->link_params.speed_cap_mask[idx]);
10480 case PORT_FEATURE_LINK_SPEED_20G:
10481 bp->link_params.req_line_speed[idx] = SPEED_20000;
10485 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10487 bp->link_params.req_line_speed[idx] =
10489 bp->port.advertising[idx] =
10490 bp->port.supported[idx];
10494 bp->link_params.req_flow_ctrl[idx] = (link_config &
10495 PORT_FEATURE_FLOW_CONTROL_MASK);
10496 if (bp->link_params.req_flow_ctrl[idx] ==
10497 BNX2X_FLOW_CTRL_AUTO) {
10498 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10499 bp->link_params.req_flow_ctrl[idx] =
10500 BNX2X_FLOW_CTRL_NONE;
10502 bnx2x_set_requested_fc(bp);
10505 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10506 bp->link_params.req_line_speed[idx],
10507 bp->link_params.req_duplex[idx],
10508 bp->link_params.req_flow_ctrl[idx],
10509 bp->port.advertising[idx]);
10513 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10515 mac_hi = cpu_to_be16(mac_hi);
10516 mac_lo = cpu_to_be32(mac_lo);
10517 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
10518 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
10521 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10523 int port = BP_PORT(bp);
10525 u32 ext_phy_type, ext_phy_config, eee_mode;
10527 bp->link_params.bp = bp;
10528 bp->link_params.port = port;
10530 bp->link_params.lane_config =
10531 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10533 bp->link_params.speed_cap_mask[0] =
10535 dev_info.port_hw_config[port].speed_capability_mask);
10536 bp->link_params.speed_cap_mask[1] =
10538 dev_info.port_hw_config[port].speed_capability_mask2);
10539 bp->port.link_config[0] =
10540 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10542 bp->port.link_config[1] =
10543 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10545 bp->link_params.multi_phy_config =
10546 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10547 /* If the device is capable of WoL, set the default state according
10550 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10551 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10552 (config & PORT_FEATURE_WOL_ENABLED));
10554 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
10555 bp->link_params.lane_config,
10556 bp->link_params.speed_cap_mask[0],
10557 bp->port.link_config[0]);
10559 bp->link_params.switch_cfg = (bp->port.link_config[0] &
10560 PORT_FEATURE_CONNECTED_SWITCH_MASK);
10561 bnx2x_phy_probe(&bp->link_params);
10562 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10564 bnx2x_link_settings_requested(bp);
10567 * If connected directly, work with the internal PHY, otherwise, work
10568 * with the external PHY
10572 dev_info.port_hw_config[port].external_phy_config);
10573 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
10574 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
10575 bp->mdio.prtad = bp->port.phy_addr;
10577 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10578 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10580 XGXS_EXT_PHY_ADDR(ext_phy_config);
10582 /* Configure link feature according to nvram value */
10583 eee_mode = (((SHMEM_RD(bp, dev_info.
10584 port_feature_config[port].eee_power_mode)) &
10585 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10586 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10587 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10588 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10589 EEE_MODE_ENABLE_LPI |
10590 EEE_MODE_OUTPUT_TIME;
10592 bp->link_params.eee_mode = 0;
10596 void bnx2x_get_iscsi_info(struct bnx2x *bp)
10598 u32 no_flags = NO_ISCSI_FLAG;
10599 int port = BP_PORT(bp);
10600 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10601 drv_lic_key[port].max_iscsi_conn);
10603 if (!CNIC_SUPPORT(bp)) {
10604 bp->flags |= no_flags;
10608 /* Get the number of maximum allowed iSCSI connections */
10609 bp->cnic_eth_dev.max_iscsi_conn =
10610 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10611 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10613 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10614 bp->cnic_eth_dev.max_iscsi_conn);
10617 * If maximum allowed number of connections is zero -
10618 * disable the feature.
10620 if (!bp->cnic_eth_dev.max_iscsi_conn)
10621 bp->flags |= no_flags;
10625 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10628 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10629 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10630 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10631 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10634 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10635 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10636 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10637 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10639 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10641 int port = BP_PORT(bp);
10642 int func = BP_ABS_FUNC(bp);
10643 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10644 drv_lic_key[port].max_fcoe_conn);
10646 if (!CNIC_SUPPORT(bp)) {
10647 bp->flags |= NO_FCOE_FLAG;
10651 /* Get the number of maximum allowed FCoE connections */
10652 bp->cnic_eth_dev.max_fcoe_conn =
10653 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10654 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10656 /* Read the WWN: */
10659 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10661 dev_info.port_hw_config[port].
10662 fcoe_wwn_port_name_upper);
10663 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10665 dev_info.port_hw_config[port].
10666 fcoe_wwn_port_name_lower);
10669 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10671 dev_info.port_hw_config[port].
10672 fcoe_wwn_node_name_upper);
10673 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10675 dev_info.port_hw_config[port].
10676 fcoe_wwn_node_name_lower);
10677 } else if (!IS_MF_SD(bp)) {
10679 * Read the WWN info only if the FCoE feature is enabled for
10682 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
10683 bnx2x_get_ext_wwn_info(bp, func);
10685 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
10686 bnx2x_get_ext_wwn_info(bp, func);
10689 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10692 * If maximum allowed number of connections is zero -
10693 * disable the feature.
10695 if (!bp->cnic_eth_dev.max_fcoe_conn)
10696 bp->flags |= NO_FCOE_FLAG;
10699 static void bnx2x_get_cnic_info(struct bnx2x *bp)
10702 * iSCSI may be dynamically disabled but reading
10703 * info here we will decrease memory usage by driver
10704 * if the feature is disabled for good
10706 bnx2x_get_iscsi_info(bp);
10707 bnx2x_get_fcoe_info(bp);
10710 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10713 int func = BP_ABS_FUNC(bp);
10714 int port = BP_PORT(bp);
10715 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10716 u8 *fip_mac = bp->fip_mac;
10719 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10720 * FCoE MAC then the appropriate feature should be disabled.
10721 * In non SD mode features configuration comes from struct
10724 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10725 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10726 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10727 val2 = MF_CFG_RD(bp, func_ext_config[func].
10728 iscsi_mac_addr_upper);
10729 val = MF_CFG_RD(bp, func_ext_config[func].
10730 iscsi_mac_addr_lower);
10731 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10733 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10735 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10738 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10739 val2 = MF_CFG_RD(bp, func_ext_config[func].
10740 fcoe_mac_addr_upper);
10741 val = MF_CFG_RD(bp, func_ext_config[func].
10742 fcoe_mac_addr_lower);
10743 bnx2x_set_mac_buf(fip_mac, val, val2);
10745 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10747 bp->flags |= NO_FCOE_FLAG;
10750 bp->mf_ext_config = cfg;
10752 } else { /* SD MODE */
10753 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10754 /* use primary mac as iscsi mac */
10755 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10757 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10759 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10760 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10761 /* use primary mac as fip mac */
10762 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10763 BNX2X_DEV_INFO("SD FCoE MODE\n");
10765 ("Read FIP MAC: %pM\n", fip_mac);
10769 if (IS_MF_STORAGE_SD(bp))
10770 /* Zero primary MAC configuration */
10771 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10773 if (IS_MF_FCOE_AFEX(bp))
10774 /* use FIP MAC as primary MAC */
10775 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10778 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10780 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10782 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10784 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10785 fcoe_fip_mac_upper);
10786 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10787 fcoe_fip_mac_lower);
10788 bnx2x_set_mac_buf(fip_mac, val, val2);
10791 /* Disable iSCSI OOO if MAC configuration is invalid. */
10792 if (!is_valid_ether_addr(iscsi_mac)) {
10793 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10794 memset(iscsi_mac, 0, ETH_ALEN);
10797 /* Disable FCoE if MAC configuration is invalid. */
10798 if (!is_valid_ether_addr(fip_mac)) {
10799 bp->flags |= NO_FCOE_FLAG;
10800 memset(bp->fip_mac, 0, ETH_ALEN);
10804 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10807 int func = BP_ABS_FUNC(bp);
10808 int port = BP_PORT(bp);
10810 /* Zero primary MAC configuration */
10811 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10813 if (BP_NOMCP(bp)) {
10814 BNX2X_ERROR("warning: random MAC workaround active\n");
10815 eth_hw_addr_random(bp->dev);
10816 } else if (IS_MF(bp)) {
10817 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10818 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10819 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10820 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10821 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10823 if (CNIC_SUPPORT(bp))
10824 bnx2x_get_cnic_mac_hwinfo(bp);
10826 /* in SF read MACs from port configuration */
10827 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10828 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10829 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10831 if (CNIC_SUPPORT(bp))
10832 bnx2x_get_cnic_mac_hwinfo(bp);
10835 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10836 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10838 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10839 dev_err(&bp->pdev->dev,
10840 "bad Ethernet MAC address configuration: %pM\n"
10841 "change it manually before bringing up the appropriate network interface\n",
10842 bp->dev->dev_addr);
10845 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
10850 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
10851 /* Take function: tmp = func */
10852 tmp = BP_ABS_FUNC(bp);
10853 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
10854 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
10856 /* Take port: tmp = port */
10859 dev_info.port_hw_config[tmp].generic_features);
10860 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
10865 static int bnx2x_get_hwinfo(struct bnx2x *bp)
10867 int /*abs*/func = BP_ABS_FUNC(bp);
10872 bnx2x_get_common_hwinfo(bp);
10875 * initialize IGU parameters
10877 if (CHIP_IS_E1x(bp)) {
10878 bp->common.int_block = INT_BLOCK_HC;
10880 bp->igu_dsb_id = DEF_SB_IGU_ID;
10881 bp->igu_base_sb = 0;
10883 bp->common.int_block = INT_BLOCK_IGU;
10885 /* do not allow device reset during IGU info preocessing */
10886 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10888 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
10890 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10893 BNX2X_DEV_INFO("FORCING Normal Mode\n");
10895 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
10896 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
10897 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
10899 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10901 usleep_range(1000, 1000);
10904 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10905 dev_err(&bp->pdev->dev,
10906 "FORCING Normal Mode failed!!!\n");
10907 bnx2x_release_hw_lock(bp,
10908 HW_LOCK_RESOURCE_RESET);
10913 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10914 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
10915 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
10917 BNX2X_DEV_INFO("IGU Normal Mode\n");
10919 rc = bnx2x_get_igu_cam_info(bp);
10920 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10926 * set base FW non-default (fast path) status block id, this value is
10927 * used to initialize the fw_sb_id saved on the fp/queue structure to
10928 * determine the id used by the FW.
10930 if (CHIP_IS_E1x(bp))
10931 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
10933 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
10934 * the same queue are indicated on the same IGU SB). So we prefer
10935 * FW and IGU SBs to be the same value.
10937 bp->base_fw_ndsb = bp->igu_base_sb;
10939 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
10940 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
10941 bp->igu_sb_cnt, bp->base_fw_ndsb);
10944 * Initialize MF configuration
10951 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
10952 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
10953 bp->common.shmem2_base, SHMEM2_RD(bp, size),
10954 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
10956 if (SHMEM2_HAS(bp, mf_cfg_addr))
10957 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
10959 bp->common.mf_cfg_base = bp->common.shmem_base +
10960 offsetof(struct shmem_region, func_mb) +
10961 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
10963 * get mf configuration:
10964 * 1. existence of MF configuration
10965 * 2. MAC address must be legal (check only upper bytes)
10966 * for Switch-Independent mode;
10967 * OVLAN must be legal for Switch-Dependent mode
10968 * 3. SF_MODE configures specific MF mode
10970 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
10971 /* get mf configuration */
10973 dev_info.shared_feature_config.config);
10974 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
10977 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
10978 val = MF_CFG_RD(bp, func_mf_config[func].
10980 /* check for legal mac (upper bytes)*/
10981 if (val != 0xffff) {
10982 bp->mf_mode = MULTI_FUNCTION_SI;
10983 bp->mf_config[vn] = MF_CFG_RD(bp,
10984 func_mf_config[func].config);
10986 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10988 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10989 if ((!CHIP_IS_E1x(bp)) &&
10990 (MF_CFG_RD(bp, func_mf_config[func].
10991 mac_upper) != 0xffff) &&
10993 afex_driver_support))) {
10994 bp->mf_mode = MULTI_FUNCTION_AFEX;
10995 bp->mf_config[vn] = MF_CFG_RD(bp,
10996 func_mf_config[func].config);
10998 BNX2X_DEV_INFO("can not configure afex mode\n");
11001 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11002 /* get OV configuration */
11003 val = MF_CFG_RD(bp,
11004 func_mf_config[FUNC_0].e1hov_tag);
11005 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11007 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11008 bp->mf_mode = MULTI_FUNCTION_SD;
11009 bp->mf_config[vn] = MF_CFG_RD(bp,
11010 func_mf_config[func].config);
11012 BNX2X_DEV_INFO("illegal OV for SD\n");
11015 /* Unknown configuration: reset mf_config */
11016 bp->mf_config[vn] = 0;
11017 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11021 BNX2X_DEV_INFO("%s function mode\n",
11022 IS_MF(bp) ? "multi" : "single");
11024 switch (bp->mf_mode) {
11025 case MULTI_FUNCTION_SD:
11026 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11027 FUNC_MF_CFG_E1HOV_TAG_MASK;
11028 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11030 bp->path_has_ovlan = true;
11032 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11033 func, bp->mf_ov, bp->mf_ov);
11035 dev_err(&bp->pdev->dev,
11036 "No valid MF OV for func %d, aborting\n",
11041 case MULTI_FUNCTION_AFEX:
11042 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11044 case MULTI_FUNCTION_SI:
11045 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11050 dev_err(&bp->pdev->dev,
11051 "VN %d is in a single function mode, aborting\n",
11058 /* check if other port on the path needs ovlan:
11059 * Since MF configuration is shared between ports
11060 * Possible mixed modes are only
11061 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
11063 if (CHIP_MODE_IS_4_PORT(bp) &&
11064 !bp->path_has_ovlan &&
11066 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11067 u8 other_port = !BP_PORT(bp);
11068 u8 other_func = BP_PATH(bp) + 2*other_port;
11069 val = MF_CFG_RD(bp,
11070 func_mf_config[other_func].e1hov_tag);
11071 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11072 bp->path_has_ovlan = true;
11076 /* adjust igu_sb_cnt to MF for E1x */
11077 if (CHIP_IS_E1x(bp) && IS_MF(bp))
11078 bp->igu_sb_cnt /= E1HVN_MAX;
11081 bnx2x_get_port_hwinfo(bp);
11083 /* Get MAC addresses */
11084 bnx2x_get_mac_hwinfo(bp);
11086 bnx2x_get_cnic_info(bp);
11091 static void bnx2x_read_fwinfo(struct bnx2x *bp)
11093 int cnt, i, block_end, rodi;
11094 char vpd_start[BNX2X_VPD_LEN+1];
11095 char str_id_reg[VENDOR_ID_LEN+1];
11096 char str_id_cap[VENDOR_ID_LEN+1];
11098 char *vpd_extended_data = NULL;
11101 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11102 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11104 if (cnt < BNX2X_VPD_LEN)
11105 goto out_not_found;
11107 /* VPD RO tag should be first tag after identifier string, hence
11108 * we should be able to find it in first BNX2X_VPD_LEN chars
11110 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11111 PCI_VPD_LRDT_RO_DATA);
11113 goto out_not_found;
11115 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11116 pci_vpd_lrdt_size(&vpd_start[i]);
11118 i += PCI_VPD_LRDT_TAG_SIZE;
11120 if (block_end > BNX2X_VPD_LEN) {
11121 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11122 if (vpd_extended_data == NULL)
11123 goto out_not_found;
11125 /* read rest of vpd image into vpd_extended_data */
11126 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11127 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11128 block_end - BNX2X_VPD_LEN,
11129 vpd_extended_data + BNX2X_VPD_LEN);
11130 if (cnt < (block_end - BNX2X_VPD_LEN))
11131 goto out_not_found;
11132 vpd_data = vpd_extended_data;
11134 vpd_data = vpd_start;
11136 /* now vpd_data holds full vpd content in both cases */
11138 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11139 PCI_VPD_RO_KEYWORD_MFR_ID);
11141 goto out_not_found;
11143 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11145 if (len != VENDOR_ID_LEN)
11146 goto out_not_found;
11148 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11150 /* vendor specific info */
11151 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11152 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11153 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11154 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11156 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11157 PCI_VPD_RO_KEYWORD_VENDOR0);
11159 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11161 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11163 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11164 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11165 bp->fw_ver[len] = ' ';
11168 kfree(vpd_extended_data);
11172 kfree(vpd_extended_data);
11176 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
11180 if (CHIP_REV_IS_FPGA(bp))
11181 SET_FLAGS(flags, MODE_FPGA);
11182 else if (CHIP_REV_IS_EMUL(bp))
11183 SET_FLAGS(flags, MODE_EMUL);
11185 SET_FLAGS(flags, MODE_ASIC);
11187 if (CHIP_MODE_IS_4_PORT(bp))
11188 SET_FLAGS(flags, MODE_PORT4);
11190 SET_FLAGS(flags, MODE_PORT2);
11192 if (CHIP_IS_E2(bp))
11193 SET_FLAGS(flags, MODE_E2);
11194 else if (CHIP_IS_E3(bp)) {
11195 SET_FLAGS(flags, MODE_E3);
11196 if (CHIP_REV(bp) == CHIP_REV_Ax)
11197 SET_FLAGS(flags, MODE_E3_A0);
11198 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
11199 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
11203 SET_FLAGS(flags, MODE_MF);
11204 switch (bp->mf_mode) {
11205 case MULTI_FUNCTION_SD:
11206 SET_FLAGS(flags, MODE_MF_SD);
11208 case MULTI_FUNCTION_SI:
11209 SET_FLAGS(flags, MODE_MF_SI);
11211 case MULTI_FUNCTION_AFEX:
11212 SET_FLAGS(flags, MODE_MF_AFEX);
11216 SET_FLAGS(flags, MODE_SF);
11218 #if defined(__LITTLE_ENDIAN)
11219 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11220 #else /*(__BIG_ENDIAN)*/
11221 SET_FLAGS(flags, MODE_BIG_ENDIAN);
11223 INIT_MODE_FLAGS(bp) = flags;
11226 static int bnx2x_init_bp(struct bnx2x *bp)
11231 mutex_init(&bp->port.phy_mutex);
11232 mutex_init(&bp->fw_mb_mutex);
11233 spin_lock_init(&bp->stats_lock);
11236 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11237 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11238 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11240 rc = bnx2x_get_hwinfo(bp);
11244 random_ether_addr(bp->dev->dev_addr);
11247 bnx2x_set_modes_bitmap(bp);
11249 rc = bnx2x_alloc_mem_bp(bp);
11253 bnx2x_read_fwinfo(bp);
11255 func = BP_FUNC(bp);
11257 /* need to reset chip if undi was active */
11258 if (IS_PF(bp) && !BP_NOMCP(bp)) {
11261 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11262 DRV_MSG_SEQ_NUMBER_MASK;
11263 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11265 bnx2x_prev_unload(bp);
11269 if (CHIP_REV_IS_FPGA(bp))
11270 dev_err(&bp->pdev->dev, "FPGA detected\n");
11272 if (BP_NOMCP(bp) && (func == 0))
11273 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11275 bp->disable_tpa = disable_tpa;
11276 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11278 /* Set TPA flags */
11279 if (bp->disable_tpa) {
11280 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11281 bp->dev->features &= ~NETIF_F_LRO;
11283 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11284 bp->dev->features |= NETIF_F_LRO;
11287 if (CHIP_IS_E1(bp))
11288 bp->dropless_fc = 0;
11290 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
11294 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11296 bp->rx_ring_size = MAX_RX_AVAIL;
11298 /* make sure that the numbers are in the right granularity */
11299 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11300 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11302 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11304 init_timer(&bp->timer);
11305 bp->timer.expires = jiffies + bp->current_interval;
11306 bp->timer.data = (unsigned long) bp;
11307 bp->timer.function = bnx2x_timer;
11309 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11310 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11311 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11312 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11313 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11314 bnx2x_dcbx_init_params(bp);
11316 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11319 if (CHIP_IS_E1x(bp))
11320 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11322 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11324 /* multiple tx priority */
11327 else if (CHIP_IS_E1x(bp))
11328 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11329 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11330 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11331 else if (CHIP_IS_E3B0(bp))
11332 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11334 BNX2X_ERR("unknown chip %x revision %x\n",
11335 CHIP_NUM(bp), CHIP_REV(bp));
11336 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
11338 /* We need at least one default status block for slow-path events,
11339 * second status block for the L2 queue, and a third status block for
11340 * CNIC if supproted.
11342 if (CNIC_SUPPORT(bp))
11343 bp->min_msix_vec_cnt = 3;
11345 bp->min_msix_vec_cnt = 2;
11346 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11352 /****************************************************************************
11353 * General service functions
11354 ****************************************************************************/
11357 * net_device service functions
11360 /* called with rtnl_lock */
11361 static int bnx2x_open(struct net_device *dev)
11363 struct bnx2x *bp = netdev_priv(dev);
11364 bool global = false;
11365 int other_engine = BP_PATH(bp) ? 0 : 1;
11366 bool other_load_status, load_status;
11368 bp->stats_init = true;
11370 netif_carrier_off(dev);
11372 bnx2x_set_power_state(bp, PCI_D0);
11374 /* If parity had happen during the unload, then attentions
11375 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11376 * want the first function loaded on the current engine to
11377 * complete the recovery.
11378 * Parity recovery is only relevant for PF driver.
11381 other_load_status = bnx2x_get_load_status(bp, other_engine);
11382 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11383 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11384 bnx2x_chk_parity_attn(bp, &global, true)) {
11386 /* If there are attentions and they are in a
11387 * global blocks, set the GLOBAL_RESET bit
11388 * regardless whether it will be this function
11389 * that will complete the recovery or not.
11392 bnx2x_set_reset_global(bp);
11394 /* Only the first function on the current
11395 * engine should try to recover in open. In case
11396 * of attentions in global blocks only the first
11397 * in the chip should try to recover.
11399 if ((!load_status &&
11400 (!global || !other_load_status)) &&
11401 bnx2x_trylock_leader_lock(bp) &&
11402 !bnx2x_leader_reset(bp)) {
11403 netdev_info(bp->dev,
11404 "Recovered in open\n");
11408 /* recovery has failed... */
11409 bnx2x_set_power_state(bp, PCI_D3hot);
11410 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11412 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11413 "If you still see this message after a few retries then power cycle is required.\n");
11420 bp->recovery_state = BNX2X_RECOVERY_DONE;
11421 return bnx2x_nic_load(bp, LOAD_OPEN);
11424 /* called with rtnl_lock */
11425 static int bnx2x_close(struct net_device *dev)
11427 struct bnx2x *bp = netdev_priv(dev);
11429 /* Unload the driver, release IRQs */
11430 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11433 bnx2x_set_power_state(bp, PCI_D3hot);
11438 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11439 struct bnx2x_mcast_ramrod_params *p)
11441 int mc_count = netdev_mc_count(bp->dev);
11442 struct bnx2x_mcast_list_elem *mc_mac =
11443 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11444 struct netdev_hw_addr *ha;
11449 INIT_LIST_HEAD(&p->mcast_list);
11451 netdev_for_each_mc_addr(ha, bp->dev) {
11452 mc_mac->mac = bnx2x_mc_addr(ha);
11453 list_add_tail(&mc_mac->link, &p->mcast_list);
11457 p->mcast_list_len = mc_count;
11462 static void bnx2x_free_mcast_macs_list(
11463 struct bnx2x_mcast_ramrod_params *p)
11465 struct bnx2x_mcast_list_elem *mc_mac =
11466 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11474 * bnx2x_set_uc_list - configure a new unicast MACs list.
11476 * @bp: driver handle
11478 * We will use zero (0) as a MAC type for these MACs.
11480 static int bnx2x_set_uc_list(struct bnx2x *bp)
11483 struct net_device *dev = bp->dev;
11484 struct netdev_hw_addr *ha;
11485 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11486 unsigned long ramrod_flags = 0;
11488 /* First schedule a cleanup up of old configuration */
11489 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11491 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11495 netdev_for_each_uc_addr(ha, dev) {
11496 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11497 BNX2X_UC_LIST_MAC, &ramrod_flags);
11498 if (rc == -EEXIST) {
11500 "Failed to schedule ADD operations: %d\n", rc);
11501 /* do not treat adding same MAC as error */
11504 } else if (rc < 0) {
11506 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11512 /* Execute the pending commands */
11513 __set_bit(RAMROD_CONT, &ramrod_flags);
11514 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11515 BNX2X_UC_LIST_MAC, &ramrod_flags);
11518 static int bnx2x_set_mc_list(struct bnx2x *bp)
11520 struct net_device *dev = bp->dev;
11521 struct bnx2x_mcast_ramrod_params rparam = {NULL};
11524 rparam.mcast_obj = &bp->mcast_obj;
11526 /* first, clear all configured multicast MACs */
11527 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11529 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
11533 /* then, configure a new MACs list */
11534 if (netdev_mc_count(dev)) {
11535 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11537 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11542 /* Now add the new MACs */
11543 rc = bnx2x_config_mcast(bp, &rparam,
11544 BNX2X_MCAST_CMD_ADD);
11546 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11549 bnx2x_free_mcast_macs_list(&rparam);
11556 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
11557 void bnx2x_set_rx_mode(struct net_device *dev)
11559 struct bnx2x *bp = netdev_priv(dev);
11560 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11562 if (bp->state != BNX2X_STATE_OPEN) {
11563 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11567 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11569 if (dev->flags & IFF_PROMISC)
11570 rx_mode = BNX2X_RX_MODE_PROMISC;
11571 else if ((dev->flags & IFF_ALLMULTI) ||
11572 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11574 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11577 /* some multicasts */
11578 if (bnx2x_set_mc_list(bp) < 0)
11579 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11581 if (bnx2x_set_uc_list(bp) < 0)
11582 rx_mode = BNX2X_RX_MODE_PROMISC;
11584 /* configuring mcast to a vf involves sleeping (when we
11585 * wait for the pf's response). Since this function is
11586 * called from non sleepable context we must schedule
11587 * a work item for this purpose
11589 smp_mb__before_clear_bit();
11590 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
11591 &bp->sp_rtnl_state);
11592 smp_mb__after_clear_bit();
11593 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11597 bp->rx_mode = rx_mode;
11598 /* handle ISCSI SD mode */
11599 if (IS_MF_ISCSI_SD(bp))
11600 bp->rx_mode = BNX2X_RX_MODE_NONE;
11602 /* Schedule the rx_mode command */
11603 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11604 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11609 bnx2x_set_storm_rx_mode(bp);
11611 /* configuring rx mode to storms in a vf involves sleeping (when
11612 * we wait for the pf's response). Since this function is
11613 * called from non sleepable context we must schedule
11614 * a work item for this purpose
11616 smp_mb__before_clear_bit();
11617 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
11618 &bp->sp_rtnl_state);
11619 smp_mb__after_clear_bit();
11620 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11624 /* called with rtnl_lock */
11625 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11626 int devad, u16 addr)
11628 struct bnx2x *bp = netdev_priv(netdev);
11632 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11633 prtad, devad, addr);
11635 /* The HW expects different devad if CL22 is used */
11636 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11638 bnx2x_acquire_phy_lock(bp);
11639 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
11640 bnx2x_release_phy_lock(bp);
11641 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11648 /* called with rtnl_lock */
11649 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11650 u16 addr, u16 value)
11652 struct bnx2x *bp = netdev_priv(netdev);
11656 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11657 prtad, devad, addr, value);
11659 /* The HW expects different devad if CL22 is used */
11660 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11662 bnx2x_acquire_phy_lock(bp);
11663 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
11664 bnx2x_release_phy_lock(bp);
11668 /* called with rtnl_lock */
11669 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11671 struct bnx2x *bp = netdev_priv(dev);
11672 struct mii_ioctl_data *mdio = if_mii(ifr);
11674 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11675 mdio->phy_id, mdio->reg_num, mdio->val_in);
11677 if (!netif_running(dev))
11680 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11683 #ifdef CONFIG_NET_POLL_CONTROLLER
11684 static void poll_bnx2x(struct net_device *dev)
11686 struct bnx2x *bp = netdev_priv(dev);
11689 for_each_eth_queue(bp, i) {
11690 struct bnx2x_fastpath *fp = &bp->fp[i];
11691 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11696 static int bnx2x_validate_addr(struct net_device *dev)
11698 struct bnx2x *bp = netdev_priv(dev);
11700 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
11701 BNX2X_ERR("Non-valid Ethernet address\n");
11702 return -EADDRNOTAVAIL;
11707 static const struct net_device_ops bnx2x_netdev_ops = {
11708 .ndo_open = bnx2x_open,
11709 .ndo_stop = bnx2x_close,
11710 .ndo_start_xmit = bnx2x_start_xmit,
11711 .ndo_select_queue = bnx2x_select_queue,
11712 .ndo_set_rx_mode = bnx2x_set_rx_mode,
11713 .ndo_set_mac_address = bnx2x_change_mac_addr,
11714 .ndo_validate_addr = bnx2x_validate_addr,
11715 .ndo_do_ioctl = bnx2x_ioctl,
11716 .ndo_change_mtu = bnx2x_change_mtu,
11717 .ndo_fix_features = bnx2x_fix_features,
11718 .ndo_set_features = bnx2x_set_features,
11719 .ndo_tx_timeout = bnx2x_tx_timeout,
11720 #ifdef CONFIG_NET_POLL_CONTROLLER
11721 .ndo_poll_controller = poll_bnx2x,
11723 .ndo_setup_tc = bnx2x_setup_tc,
11725 #ifdef NETDEV_FCOE_WWNN
11726 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11730 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
11732 struct device *dev = &bp->pdev->dev;
11734 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
11735 bp->flags |= USING_DAC_FLAG;
11736 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
11737 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
11740 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
11741 dev_err(dev, "System does not support DMA, aborting\n");
11748 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11749 struct net_device *dev, unsigned long board_type)
11753 bool chip_is_e1x = (board_type == BCM57710 ||
11754 board_type == BCM57711 ||
11755 board_type == BCM57711E);
11757 SET_NETDEV_DEV(dev, &pdev->dev);
11762 rc = pci_enable_device(pdev);
11764 dev_err(&bp->pdev->dev,
11765 "Cannot enable PCI device, aborting\n");
11769 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11770 dev_err(&bp->pdev->dev,
11771 "Cannot find PCI device base address, aborting\n");
11773 goto err_out_disable;
11776 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11777 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
11779 goto err_out_disable;
11782 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
11783 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
11784 PCICFG_REVESION_ID_ERROR_VAL) {
11785 pr_err("PCI device error, probably due to fan failure, aborting\n");
11787 goto err_out_disable;
11790 if (atomic_read(&pdev->enable_cnt) == 1) {
11791 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11793 dev_err(&bp->pdev->dev,
11794 "Cannot obtain PCI resources, aborting\n");
11795 goto err_out_disable;
11798 pci_set_master(pdev);
11799 pci_save_state(pdev);
11803 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11804 if (bp->pm_cap == 0) {
11805 dev_err(&bp->pdev->dev,
11806 "Cannot find power management capability, aborting\n");
11808 goto err_out_release;
11812 if (!pci_is_pcie(pdev)) {
11813 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
11815 goto err_out_release;
11818 rc = bnx2x_set_coherency_mask(bp);
11820 goto err_out_release;
11822 dev->mem_start = pci_resource_start(pdev, 0);
11823 dev->base_addr = dev->mem_start;
11824 dev->mem_end = pci_resource_end(pdev, 0);
11826 dev->irq = pdev->irq;
11828 bp->regview = pci_ioremap_bar(pdev, 0);
11829 if (!bp->regview) {
11830 dev_err(&bp->pdev->dev,
11831 "Cannot map register space, aborting\n");
11833 goto err_out_release;
11836 /* In E1/E1H use pci device function given by kernel.
11837 * In E2/E3 read physical function from ME register since these chips
11838 * support Physical Device Assignment where kernel BDF maybe arbitrary
11839 * (depending on hypervisor).
11842 bp->pf_num = PCI_FUNC(pdev->devfn);
11843 else {/* chip is E2/3*/
11844 pci_read_config_dword(bp->pdev,
11845 PCICFG_ME_REGISTER, &pci_cfg_dword);
11846 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
11847 ME_REG_ABS_PF_NUM_SHIFT);
11849 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
11851 bnx2x_set_power_state(bp, PCI_D0);
11853 /* clean indirect addresses */
11854 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11855 PCICFG_VENDOR_ID_OFFSET);
11857 * Clean the following indirect addresses for all functions since it
11858 * is not used by the driver.
11861 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
11862 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
11863 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
11864 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
11867 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
11868 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
11869 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
11870 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
11873 /* Enable internal target-read (in case we are probed after PF
11874 * FLR). Must be done prior to any BAR read access. Only for
11879 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
11882 dev->watchdog_timeo = TX_TIMEOUT;
11884 dev->netdev_ops = &bnx2x_netdev_ops;
11885 bnx2x_set_ethtool_ops(dev);
11887 dev->priv_flags |= IFF_UNICAST_FLT;
11889 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11890 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
11891 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
11892 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
11894 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11895 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
11897 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
11898 if (bp->flags & USING_DAC_FLAG)
11899 dev->features |= NETIF_F_HIGHDMA;
11901 /* Add Loopback capability to the device */
11902 dev->hw_features |= NETIF_F_LOOPBACK;
11905 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
11908 /* get_port_hwinfo() will set prtad and mmds properly */
11909 bp->mdio.prtad = MDIO_PRTAD_NONE;
11911 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11912 bp->mdio.dev = dev;
11913 bp->mdio.mdio_read = bnx2x_mdio_read;
11914 bp->mdio.mdio_write = bnx2x_mdio_write;
11919 if (atomic_read(&pdev->enable_cnt) == 1)
11920 pci_release_regions(pdev);
11923 pci_disable_device(pdev);
11924 pci_set_drvdata(pdev, NULL);
11930 static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width, int *speed)
11934 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
11935 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11937 /* return value of 1=2.5GHz 2=5GHz */
11938 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11941 static int bnx2x_check_firmware(struct bnx2x *bp)
11943 const struct firmware *firmware = bp->firmware;
11944 struct bnx2x_fw_file_hdr *fw_hdr;
11945 struct bnx2x_fw_file_section *sections;
11946 u32 offset, len, num_ops;
11951 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
11952 BNX2X_ERR("Wrong FW size\n");
11956 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11957 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11959 /* Make sure none of the offsets and sizes make us read beyond
11960 * the end of the firmware data */
11961 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11962 offset = be32_to_cpu(sections[i].offset);
11963 len = be32_to_cpu(sections[i].len);
11964 if (offset + len > firmware->size) {
11965 BNX2X_ERR("Section %d length is out of bounds\n", i);
11970 /* Likewise for the init_ops offsets */
11971 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11972 ops_offsets = (u16 *)(firmware->data + offset);
11973 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11975 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11976 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11977 BNX2X_ERR("Section offset %d is out of bounds\n", i);
11982 /* Check FW version */
11983 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11984 fw_ver = firmware->data + offset;
11985 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11986 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11987 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11988 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11989 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11990 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
11991 BCM_5710_FW_MAJOR_VERSION,
11992 BCM_5710_FW_MINOR_VERSION,
11993 BCM_5710_FW_REVISION_VERSION,
11994 BCM_5710_FW_ENGINEERING_VERSION);
12001 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12003 const __be32 *source = (const __be32 *)_source;
12004 u32 *target = (u32 *)_target;
12007 for (i = 0; i < n/4; i++)
12008 target[i] = be32_to_cpu(source[i]);
12012 Ops array is stored in the following format:
12013 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12015 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12017 const __be32 *source = (const __be32 *)_source;
12018 struct raw_op *target = (struct raw_op *)_target;
12021 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12022 tmp = be32_to_cpu(source[j]);
12023 target[i].op = (tmp >> 24) & 0xff;
12024 target[i].offset = tmp & 0xffffff;
12025 target[i].raw_data = be32_to_cpu(source[j + 1]);
12029 /* IRO array is stored in the following format:
12030 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
12032 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12034 const __be32 *source = (const __be32 *)_source;
12035 struct iro *target = (struct iro *)_target;
12038 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12039 target[i].base = be32_to_cpu(source[j]);
12041 tmp = be32_to_cpu(source[j]);
12042 target[i].m1 = (tmp >> 16) & 0xffff;
12043 target[i].m2 = tmp & 0xffff;
12045 tmp = be32_to_cpu(source[j]);
12046 target[i].m3 = (tmp >> 16) & 0xffff;
12047 target[i].size = tmp & 0xffff;
12052 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12054 const __be16 *source = (const __be16 *)_source;
12055 u16 *target = (u16 *)_target;
12058 for (i = 0; i < n/2; i++)
12059 target[i] = be16_to_cpu(source[i]);
12062 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12064 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12065 bp->arr = kmalloc(len, GFP_KERNEL); \
12068 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12069 (u8 *)bp->arr, len); \
12072 static int bnx2x_init_firmware(struct bnx2x *bp)
12074 const char *fw_file_name;
12075 struct bnx2x_fw_file_hdr *fw_hdr;
12081 if (CHIP_IS_E1(bp))
12082 fw_file_name = FW_FILE_NAME_E1;
12083 else if (CHIP_IS_E1H(bp))
12084 fw_file_name = FW_FILE_NAME_E1H;
12085 else if (!CHIP_IS_E1x(bp))
12086 fw_file_name = FW_FILE_NAME_E2;
12088 BNX2X_ERR("Unsupported chip revision\n");
12091 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
12093 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12095 BNX2X_ERR("Can't load firmware file %s\n",
12097 goto request_firmware_exit;
12100 rc = bnx2x_check_firmware(bp);
12102 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12103 goto request_firmware_exit;
12106 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12108 /* Initialize the pointers to the init arrays */
12110 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12113 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12116 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12119 /* STORMs firmware */
12120 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12121 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12122 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12123 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12124 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12125 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12126 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12127 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12128 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12129 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12130 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12131 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12132 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12133 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12134 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12135 be32_to_cpu(fw_hdr->csem_pram_data.offset);
12137 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
12142 kfree(bp->init_ops_offsets);
12143 init_offsets_alloc_err:
12144 kfree(bp->init_ops);
12145 init_ops_alloc_err:
12146 kfree(bp->init_data);
12147 request_firmware_exit:
12148 release_firmware(bp->firmware);
12149 bp->firmware = NULL;
12154 static void bnx2x_release_firmware(struct bnx2x *bp)
12156 kfree(bp->init_ops_offsets);
12157 kfree(bp->init_ops);
12158 kfree(bp->init_data);
12159 release_firmware(bp->firmware);
12160 bp->firmware = NULL;
12164 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12165 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12166 .init_hw_cmn = bnx2x_init_hw_common,
12167 .init_hw_port = bnx2x_init_hw_port,
12168 .init_hw_func = bnx2x_init_hw_func,
12170 .reset_hw_cmn = bnx2x_reset_common,
12171 .reset_hw_port = bnx2x_reset_port,
12172 .reset_hw_func = bnx2x_reset_func,
12174 .gunzip_init = bnx2x_gunzip_init,
12175 .gunzip_end = bnx2x_gunzip_end,
12177 .init_fw = bnx2x_init_firmware,
12178 .release_fw = bnx2x_release_firmware,
12181 void bnx2x__init_func_obj(struct bnx2x *bp)
12183 /* Prepare DMAE related driver resources */
12184 bnx2x_setup_dmae(bp);
12186 bnx2x_init_func_obj(bp, &bp->func_obj,
12187 bnx2x_sp(bp, func_rdata),
12188 bnx2x_sp_mapping(bp, func_rdata),
12189 bnx2x_sp(bp, func_afex_rdata),
12190 bnx2x_sp_mapping(bp, func_afex_rdata),
12191 &bnx2x_func_sp_drv);
12194 /* must be called after sriov-enable */
12195 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12197 int cid_count = BNX2X_L2_MAX_CID(bp);
12200 cid_count += BNX2X_VF_CIDS;
12202 if (CNIC_SUPPORT(bp))
12203 cid_count += CNIC_CID_MAX;
12205 return roundup(cid_count, QM_CID_ROUND);
12209 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
12214 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12215 int cnic_cnt, bool is_vf)
12220 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
12223 * If MSI-X is not supported - return number of SBs needed to support
12224 * one fast path queue: one FP queue + SB for CNIC
12227 dev_info(&pdev->dev, "no msix capability found\n");
12228 return 1 + cnic_cnt;
12230 dev_info(&pdev->dev, "msix capability found\n");
12233 * The value in the PCI configuration space is the index of the last
12234 * entry, namely one less than the actual size of the table, which is
12235 * exactly what we want to return from this function: number of all SBs
12236 * without the default SB.
12237 * For VFs there is no default SB, then we return (index+1).
12239 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
12241 index = control & PCI_MSIX_FLAGS_QSIZE;
12243 return is_vf ? index + 1 : index;
12246 static int set_max_cos_est(int chip_id)
12252 return BNX2X_MULTI_TX_COS_E1X;
12256 return BNX2X_MULTI_TX_COS_E2_E3A0;
12262 case BCM57840_4_10:
12263 case BCM57840_2_20:
12272 return BNX2X_MULTI_TX_COS_E3B0;
12275 pr_err("Unknown board_type (%d), aborting\n", chip_id);
12280 static int set_is_vf(int chip_id)
12294 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
12296 static int bnx2x_init_one(struct pci_dev *pdev,
12297 const struct pci_device_id *ent)
12299 struct net_device *dev = NULL;
12301 int pcie_width, pcie_speed;
12302 int rc, max_non_def_sbs;
12303 int rx_count, tx_count, rss_count, doorbell_size;
12308 /* An estimated maximum supported CoS number according to the chip
12310 * We will try to roughly estimate the maximum number of CoSes this chip
12311 * may support in order to minimize the memory allocated for Tx
12312 * netdev_queue's. This number will be accurately calculated during the
12313 * initialization of bp->max_cos based on the chip versions AND chip
12314 * revision in the bnx2x_init_bp().
12316 max_cos_est = set_max_cos_est(ent->driver_data);
12317 if (max_cos_est < 0)
12318 return max_cos_est;
12319 is_vf = set_is_vf(ent->driver_data);
12320 cnic_cnt = is_vf ? 0 : 1;
12322 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
12324 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12325 rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
12330 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
12331 rx_count = rss_count + cnic_cnt;
12333 /* Maximum number of netdev Tx queues:
12334 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
12336 tx_count = rss_count * max_cos_est + cnic_cnt;
12338 /* dev zeroed in init_etherdev */
12339 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12343 bp = netdev_priv(dev);
12347 bp->flags |= IS_VF_FLAG;
12349 bp->igu_sb_cnt = max_non_def_sbs;
12350 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
12351 bp->msg_enable = debug;
12352 bp->cnic_support = cnic_cnt;
12353 bp->cnic_probe = bnx2x_cnic_probe;
12355 pci_set_drvdata(pdev, dev);
12357 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
12363 BNX2X_DEV_INFO("This is a %s function\n",
12364 IS_PF(bp) ? "physical" : "virtual");
12365 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12366 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
12367 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12368 tx_count, rx_count);
12370 rc = bnx2x_init_bp(bp);
12372 goto init_one_exit;
12374 /* Map doorbells here as we need the real value of bp->max_cos which
12375 * is initialized in bnx2x_init_bp() to determine the number of
12379 /* vf doorbells are embedded within the regview */
12380 bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START;
12382 /* allocate vf2pf mailbox for vf to pf channel */
12383 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
12384 sizeof(struct bnx2x_vf_mbx_msg));
12386 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12387 if (doorbell_size > pci_resource_len(pdev, 2)) {
12388 dev_err(&bp->pdev->dev,
12389 "Cannot map doorbells, bar size too small, aborting\n");
12391 goto init_one_exit;
12393 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12396 if (!bp->doorbells) {
12397 dev_err(&bp->pdev->dev,
12398 "Cannot map doorbell space, aborting\n");
12400 goto init_one_exit;
12404 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12406 goto init_one_exit;
12409 /* Enable SRIOV if capability found in configuration space.
12410 * Once the generic SR-IOV framework makes it in from the
12411 * pci tree this will be revised, to allow dynamic control
12412 * over the number of VFs. Right now, change the num of vfs
12413 * param below to enable SR-IOV.
12415 rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
12417 goto init_one_exit;
12419 /* calc qm_cid_count */
12420 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12421 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
12423 /* disable FCOE L2 queue for E1x*/
12424 if (CHIP_IS_E1x(bp))
12425 bp->flags |= NO_FCOE_FLAG;
12427 /* disable FCOE for 57840 device, until FW supports it */
12428 switch (ent->driver_data) {
12430 case BCM57840_4_10:
12431 case BCM57840_2_20:
12434 bp->flags |= NO_FCOE_FLAG;
12437 /* Set bp->num_queues for MSI-X mode*/
12438 bnx2x_set_num_queues(bp);
12440 /* Configure interrupt mode: try to enable MSI-X/MSI if
12443 rc = bnx2x_set_int_mode(bp);
12445 dev_err(&pdev->dev, "Cannot set interrupts\n");
12446 goto init_one_exit;
12449 /* register the net device */
12450 rc = register_netdev(dev);
12452 dev_err(&pdev->dev, "Cannot register net device\n");
12453 goto init_one_exit;
12455 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
12458 if (!NO_FCOE(bp)) {
12459 /* Add storage MAC address */
12461 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12465 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12466 BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
12467 pcie_width, pcie_speed);
12470 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12471 board_info[ent->driver_data].name,
12472 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12474 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
12475 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
12476 "5GHz (Gen2)" : "2.5GHz",
12477 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12482 BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
12483 sizeof(struct bnx2x_vf_mbx_msg));
12488 iounmap(bp->regview);
12490 if (IS_PF(bp) && bp->doorbells)
12491 iounmap(bp->doorbells);
12495 if (atomic_read(&pdev->enable_cnt) == 1)
12496 pci_release_regions(pdev);
12498 pci_disable_device(pdev);
12499 pci_set_drvdata(pdev, NULL);
12504 static void bnx2x_remove_one(struct pci_dev *pdev)
12506 struct net_device *dev = pci_get_drvdata(pdev);
12510 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12513 bp = netdev_priv(dev);
12515 /* Delete storage MAC address */
12516 if (!NO_FCOE(bp)) {
12518 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12523 /* Delete app tlvs from dcbnl */
12524 bnx2x_dcbnl_update_applist(bp, true);
12527 unregister_netdev(dev);
12529 /* Power on: we can't let PCI layer write to us while we are in D3 */
12531 bnx2x_set_power_state(bp, PCI_D0);
12533 /* Disable MSI/MSI-X */
12534 bnx2x_disable_msi(bp);
12538 bnx2x_set_power_state(bp, PCI_D3hot);
12540 /* Make sure RESET task is not scheduled before continuing */
12541 cancel_delayed_work_sync(&bp->sp_rtnl_task);
12543 bnx2x_iov_remove_one(bp);
12545 /* send message via vfpf channel to release the resources of this vf */
12547 bnx2x_vfpf_release(bp);
12550 iounmap(bp->regview);
12552 /* for vf doorbells are part of the regview and were unmapped along with
12553 * it. FW is only loaded by PF.
12557 iounmap(bp->doorbells);
12559 bnx2x_release_firmware(bp);
12561 bnx2x_free_mem_bp(bp);
12565 if (atomic_read(&pdev->enable_cnt) == 1)
12566 pci_release_regions(pdev);
12568 pci_disable_device(pdev);
12569 pci_set_drvdata(pdev, NULL);
12572 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12576 bp->state = BNX2X_STATE_ERROR;
12578 bp->rx_mode = BNX2X_RX_MODE_NONE;
12580 if (CNIC_LOADED(bp))
12581 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12584 bnx2x_tx_disable(bp);
12586 bnx2x_netif_stop(bp, 0);
12587 /* Delete all NAPI objects */
12588 bnx2x_del_all_napi(bp);
12589 if (CNIC_LOADED(bp))
12590 bnx2x_del_all_napi_cnic(bp);
12592 del_timer_sync(&bp->timer);
12594 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
12597 bnx2x_free_irq(bp);
12599 /* Free SKBs, SGEs, TPA pool and driver internals */
12600 bnx2x_free_skbs(bp);
12602 for_each_rx_queue(bp, i)
12603 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12605 bnx2x_free_mem(bp);
12607 bp->state = BNX2X_STATE_CLOSED;
12609 netif_carrier_off(bp->dev);
12614 static void bnx2x_eeh_recover(struct bnx2x *bp)
12618 mutex_init(&bp->port.phy_mutex);
12621 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12622 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12623 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12624 BNX2X_ERR("BAD MCP validity signature\n");
12628 * bnx2x_io_error_detected - called when PCI error is detected
12629 * @pdev: Pointer to PCI device
12630 * @state: The current pci connection state
12632 * This function is called after a PCI bus error affecting
12633 * this device has been detected.
12635 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12636 pci_channel_state_t state)
12638 struct net_device *dev = pci_get_drvdata(pdev);
12639 struct bnx2x *bp = netdev_priv(dev);
12643 netif_device_detach(dev);
12645 if (state == pci_channel_io_perm_failure) {
12647 return PCI_ERS_RESULT_DISCONNECT;
12650 if (netif_running(dev))
12651 bnx2x_eeh_nic_unload(bp);
12653 pci_disable_device(pdev);
12657 /* Request a slot reset */
12658 return PCI_ERS_RESULT_NEED_RESET;
12662 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12663 * @pdev: Pointer to PCI device
12665 * Restart the card from scratch, as if from a cold-boot.
12667 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12669 struct net_device *dev = pci_get_drvdata(pdev);
12670 struct bnx2x *bp = netdev_priv(dev);
12674 if (pci_enable_device(pdev)) {
12675 dev_err(&pdev->dev,
12676 "Cannot re-enable PCI device after reset\n");
12678 return PCI_ERS_RESULT_DISCONNECT;
12681 pci_set_master(pdev);
12682 pci_restore_state(pdev);
12684 if (netif_running(dev))
12685 bnx2x_set_power_state(bp, PCI_D0);
12689 return PCI_ERS_RESULT_RECOVERED;
12693 * bnx2x_io_resume - called when traffic can start flowing again
12694 * @pdev: Pointer to PCI device
12696 * This callback is called when the error recovery driver tells us that
12697 * its OK to resume normal operation.
12699 static void bnx2x_io_resume(struct pci_dev *pdev)
12701 struct net_device *dev = pci_get_drvdata(pdev);
12702 struct bnx2x *bp = netdev_priv(dev);
12704 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12705 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
12711 bnx2x_eeh_recover(bp);
12713 if (netif_running(dev))
12714 bnx2x_nic_load(bp, LOAD_NORMAL);
12716 netif_device_attach(dev);
12721 static const struct pci_error_handlers bnx2x_err_handler = {
12722 .error_detected = bnx2x_io_error_detected,
12723 .slot_reset = bnx2x_io_slot_reset,
12724 .resume = bnx2x_io_resume,
12727 static struct pci_driver bnx2x_pci_driver = {
12728 .name = DRV_MODULE_NAME,
12729 .id_table = bnx2x_pci_tbl,
12730 .probe = bnx2x_init_one,
12731 .remove = bnx2x_remove_one,
12732 .suspend = bnx2x_suspend,
12733 .resume = bnx2x_resume,
12734 .err_handler = &bnx2x_err_handler,
12737 static int __init bnx2x_init(void)
12741 pr_info("%s", version);
12743 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12744 if (bnx2x_wq == NULL) {
12745 pr_err("Cannot create workqueue\n");
12749 ret = pci_register_driver(&bnx2x_pci_driver);
12751 pr_err("Cannot register driver\n");
12752 destroy_workqueue(bnx2x_wq);
12757 static void __exit bnx2x_cleanup(void)
12759 struct list_head *pos, *q;
12760 pci_unregister_driver(&bnx2x_pci_driver);
12762 destroy_workqueue(bnx2x_wq);
12764 /* Free globablly allocated resources */
12765 list_for_each_safe(pos, q, &bnx2x_prev_list) {
12766 struct bnx2x_prev_path_list *tmp =
12767 list_entry(pos, struct bnx2x_prev_path_list, list);
12773 void bnx2x_notify_link_changed(struct bnx2x *bp)
12775 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
12778 module_init(bnx2x_init);
12779 module_exit(bnx2x_cleanup);
12782 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
12784 * @bp: driver handle
12785 * @set: set or clear the CAM entry
12787 * This function will wait until the ramdord completion returns.
12788 * Return 0 if success, -ENODEV if ramrod doesn't return.
12790 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12792 unsigned long ramrod_flags = 0;
12794 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12795 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
12796 &bp->iscsi_l2_mac_obj, true,
12797 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
12800 /* count denotes the number of new completions we have seen */
12801 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12803 struct eth_spe *spe;
12804 int cxt_index, cxt_offset;
12806 #ifdef BNX2X_STOP_ON_ERROR
12807 if (unlikely(bp->panic))
12811 spin_lock_bh(&bp->spq_lock);
12812 BUG_ON(bp->cnic_spq_pending < count);
12813 bp->cnic_spq_pending -= count;
12816 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
12817 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
12818 & SPE_HDR_CONN_TYPE) >>
12819 SPE_HDR_CONN_TYPE_SHIFT;
12820 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
12821 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
12823 /* Set validation for iSCSI L2 client before sending SETUP
12826 if (type == ETH_CONNECTION_TYPE) {
12827 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12828 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12830 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12831 (cxt_index * ILT_PAGE_CIDS);
12832 bnx2x_set_ctx_validation(bp,
12833 &bp->context[cxt_index].
12834 vcxt[cxt_offset].eth,
12835 BNX2X_ISCSI_ETH_CID(bp));
12840 * There may be not more than 8 L2, not more than 8 L5 SPEs
12841 * and in the air. We also check that number of outstanding
12842 * COMMON ramrods is not more than the EQ and SPQ can
12845 if (type == ETH_CONNECTION_TYPE) {
12846 if (!atomic_read(&bp->cq_spq_left))
12849 atomic_dec(&bp->cq_spq_left);
12850 } else if (type == NONE_CONNECTION_TYPE) {
12851 if (!atomic_read(&bp->eq_spq_left))
12854 atomic_dec(&bp->eq_spq_left);
12855 } else if ((type == ISCSI_CONNECTION_TYPE) ||
12856 (type == FCOE_CONNECTION_TYPE)) {
12857 if (bp->cnic_spq_pending >=
12858 bp->cnic_eth_dev.max_kwqe_pending)
12861 bp->cnic_spq_pending++;
12863 BNX2X_ERR("Unknown SPE type: %d\n", type);
12868 spe = bnx2x_sp_get_next(bp);
12869 *spe = *bp->cnic_kwq_cons;
12871 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
12872 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12874 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12875 bp->cnic_kwq_cons = bp->cnic_kwq;
12877 bp->cnic_kwq_cons++;
12879 bnx2x_sp_prod_update(bp);
12880 spin_unlock_bh(&bp->spq_lock);
12883 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12884 struct kwqe_16 *kwqes[], u32 count)
12886 struct bnx2x *bp = netdev_priv(dev);
12889 #ifdef BNX2X_STOP_ON_ERROR
12890 if (unlikely(bp->panic)) {
12891 BNX2X_ERR("Can't post to SP queue while panic\n");
12896 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
12897 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
12898 BNX2X_ERR("Handling parity error recovery. Try again later\n");
12902 spin_lock_bh(&bp->spq_lock);
12904 for (i = 0; i < count; i++) {
12905 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12907 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12910 *bp->cnic_kwq_prod = *spe;
12912 bp->cnic_kwq_pending++;
12914 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
12915 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12916 spe->data.update_data_addr.hi,
12917 spe->data.update_data_addr.lo,
12918 bp->cnic_kwq_pending);
12920 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12921 bp->cnic_kwq_prod = bp->cnic_kwq;
12923 bp->cnic_kwq_prod++;
12926 spin_unlock_bh(&bp->spq_lock);
12928 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12929 bnx2x_cnic_sp_post(bp, 0);
12934 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12936 struct cnic_ops *c_ops;
12939 mutex_lock(&bp->cnic_mutex);
12940 c_ops = rcu_dereference_protected(bp->cnic_ops,
12941 lockdep_is_held(&bp->cnic_mutex));
12943 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12944 mutex_unlock(&bp->cnic_mutex);
12949 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12951 struct cnic_ops *c_ops;
12955 c_ops = rcu_dereference(bp->cnic_ops);
12957 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12964 * for commands that have no data
12966 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12968 struct cnic_ctl_info ctl = {0};
12972 return bnx2x_cnic_ctl_send(bp, &ctl);
12975 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
12977 struct cnic_ctl_info ctl = {0};
12979 /* first we tell CNIC and only then we count this as a completion */
12980 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12981 ctl.data.comp.cid = cid;
12982 ctl.data.comp.error = err;
12984 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12985 bnx2x_cnic_sp_post(bp, 0);
12989 /* Called with netif_addr_lock_bh() taken.
12990 * Sets an rx_mode config for an iSCSI ETH client.
12992 * Completion should be checked outside.
12994 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
12996 unsigned long accept_flags = 0, ramrod_flags = 0;
12997 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12998 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13001 /* Start accepting on iSCSI L2 ring. Accept all multicasts
13002 * because it's the only way for UIO Queue to accept
13003 * multicasts (in non-promiscuous mode only one Queue per
13004 * function will receive multicast packets (leading in our
13007 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13008 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13009 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13010 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13012 /* Clear STOP_PENDING bit if START is requested */
13013 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13015 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13017 /* Clear START_PENDING bit if STOP is requested */
13018 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13020 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13021 set_bit(sched_state, &bp->sp_state);
13023 __set_bit(RAMROD_RX, &ramrod_flags);
13024 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13030 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13032 struct bnx2x *bp = netdev_priv(dev);
13035 switch (ctl->cmd) {
13036 case DRV_CTL_CTXTBL_WR_CMD: {
13037 u32 index = ctl->data.io.offset;
13038 dma_addr_t addr = ctl->data.io.dma_addr;
13040 bnx2x_ilt_wr(bp, index, addr);
13044 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13045 int count = ctl->data.credit.credit_count;
13047 bnx2x_cnic_sp_post(bp, count);
13051 /* rtnl_lock is held. */
13052 case DRV_CTL_START_L2_CMD: {
13053 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13054 unsigned long sp_bits = 0;
13056 /* Configure the iSCSI classification object */
13057 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13058 cp->iscsi_l2_client_id,
13059 cp->iscsi_l2_cid, BP_FUNC(bp),
13060 bnx2x_sp(bp, mac_rdata),
13061 bnx2x_sp_mapping(bp, mac_rdata),
13062 BNX2X_FILTER_MAC_PENDING,
13063 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13066 /* Set iSCSI MAC address */
13067 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13074 /* Start accepting on iSCSI L2 ring */
13076 netif_addr_lock_bh(dev);
13077 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13078 netif_addr_unlock_bh(dev);
13080 /* bits to wait on */
13081 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13082 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13084 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13085 BNX2X_ERR("rx_mode completion timed out!\n");
13090 /* rtnl_lock is held. */
13091 case DRV_CTL_STOP_L2_CMD: {
13092 unsigned long sp_bits = 0;
13094 /* Stop accepting on iSCSI L2 ring */
13095 netif_addr_lock_bh(dev);
13096 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13097 netif_addr_unlock_bh(dev);
13099 /* bits to wait on */
13100 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13101 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13103 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13104 BNX2X_ERR("rx_mode completion timed out!\n");
13109 /* Unset iSCSI L2 MAC */
13110 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13111 BNX2X_ISCSI_ETH_MAC, true);
13114 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13115 int count = ctl->data.credit.credit_count;
13117 smp_mb__before_atomic_inc();
13118 atomic_add(count, &bp->cq_spq_left);
13119 smp_mb__after_atomic_inc();
13122 case DRV_CTL_ULP_REGISTER_CMD: {
13123 int ulp_type = ctl->data.register_data.ulp_type;
13125 if (CHIP_IS_E3(bp)) {
13126 int idx = BP_FW_MB_IDX(bp);
13127 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13128 int path = BP_PATH(bp);
13129 int port = BP_PORT(bp);
13131 u32 scratch_offset;
13134 /* first write capability to shmem2 */
13135 if (ulp_type == CNIC_ULP_ISCSI)
13136 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13137 else if (ulp_type == CNIC_ULP_FCOE)
13138 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13139 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13141 if ((ulp_type != CNIC_ULP_FCOE) ||
13142 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13143 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
13146 /* if reached here - should write fcoe capabilities */
13147 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13148 if (!scratch_offset)
13150 scratch_offset += offsetof(struct glob_ncsi_oem_data,
13151 fcoe_features[path][port]);
13152 host_addr = (u32 *) &(ctl->data.register_data.
13154 for (i = 0; i < sizeof(struct fcoe_capabilities);
13156 REG_WR(bp, scratch_offset + i,
13157 *(host_addr + i/4));
13162 case DRV_CTL_ULP_UNREGISTER_CMD: {
13163 int ulp_type = ctl->data.ulp_type;
13165 if (CHIP_IS_E3(bp)) {
13166 int idx = BP_FW_MB_IDX(bp);
13169 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13170 if (ulp_type == CNIC_ULP_ISCSI)
13171 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13172 else if (ulp_type == CNIC_ULP_FCOE)
13173 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13174 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13180 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13187 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13189 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13191 if (bp->flags & USING_MSIX_FLAG) {
13192 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13193 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13194 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13196 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13197 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13199 if (!CHIP_IS_E1x(bp))
13200 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13202 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13204 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
13205 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
13206 cp->irq_arr[1].status_blk = bp->def_status_blk;
13207 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13208 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
13213 void bnx2x_setup_cnic_info(struct bnx2x *bp)
13215 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13218 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13219 bnx2x_cid_ilt_lines(bp);
13220 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13221 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13222 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13224 if (NO_ISCSI_OOO(bp))
13225 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13228 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13231 struct bnx2x *bp = netdev_priv(dev);
13232 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13235 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
13238 BNX2X_ERR("NULL ops received\n");
13242 if (!CNIC_SUPPORT(bp)) {
13243 BNX2X_ERR("Can't register CNIC when not supported\n");
13244 return -EOPNOTSUPP;
13247 if (!CNIC_LOADED(bp)) {
13248 rc = bnx2x_load_cnic(bp);
13250 BNX2X_ERR("CNIC-related load failed\n");
13256 bp->cnic_enabled = true;
13258 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13262 bp->cnic_kwq_cons = bp->cnic_kwq;
13263 bp->cnic_kwq_prod = bp->cnic_kwq;
13264 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13266 bp->cnic_spq_pending = 0;
13267 bp->cnic_kwq_pending = 0;
13269 bp->cnic_data = data;
13272 cp->drv_state |= CNIC_DRV_STATE_REGD;
13273 cp->iro_arr = bp->iro_arr;
13275 bnx2x_setup_cnic_irq_info(bp);
13277 rcu_assign_pointer(bp->cnic_ops, ops);
13282 static int bnx2x_unregister_cnic(struct net_device *dev)
13284 struct bnx2x *bp = netdev_priv(dev);
13285 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13287 mutex_lock(&bp->cnic_mutex);
13289 RCU_INIT_POINTER(bp->cnic_ops, NULL);
13290 mutex_unlock(&bp->cnic_mutex);
13292 kfree(bp->cnic_kwq);
13293 bp->cnic_kwq = NULL;
13298 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13300 struct bnx2x *bp = netdev_priv(dev);
13301 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13303 /* If both iSCSI and FCoE are disabled - return NULL in
13304 * order to indicate CNIC that it should not try to work
13305 * with this device.
13307 if (NO_ISCSI(bp) && NO_FCOE(bp))
13310 cp->drv_owner = THIS_MODULE;
13311 cp->chip_id = CHIP_ID(bp);
13312 cp->pdev = bp->pdev;
13313 cp->io_base = bp->regview;
13314 cp->io_base2 = bp->doorbells;
13315 cp->max_kwqe_pending = 8;
13316 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
13317 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13318 bnx2x_cid_ilt_lines(bp);
13319 cp->ctx_tbl_len = CNIC_ILT_LINES;
13320 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13321 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13322 cp->drv_ctl = bnx2x_drv_ctl;
13323 cp->drv_register_cnic = bnx2x_register_cnic;
13324 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13325 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13326 cp->iscsi_l2_client_id =
13327 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13328 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13330 if (NO_ISCSI_OOO(bp))
13331 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13334 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13337 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13340 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
13342 cp->ctx_tbl_offset,
13348 int bnx2x_send_msg2pf(struct bnx2x *bp, u8 *done, dma_addr_t msg_mapping)
13350 struct cstorm_vf_zone_data __iomem *zone_data =
13351 REG_ADDR(bp, PXP_VF_ADDR_CSDM_GLOBAL_START);
13352 int tout = 600, interval = 100; /* wait for 60 seconds */
13355 BNX2X_ERR("done was non zero before message to pf was sent\n");
13360 /* Write message address */
13361 writel(U64_LO(msg_mapping),
13362 &zone_data->non_trigger.vf_pf_channel.msg_addr_lo);
13363 writel(U64_HI(msg_mapping),
13364 &zone_data->non_trigger.vf_pf_channel.msg_addr_hi);
13366 /* make sure the address is written before FW accesses it */
13369 /* Trigger the PF FW */
13370 writeb(1, &zone_data->trigger.vf_pf_channel.addr_valid);
13372 /* Wait for PF to complete */
13373 while ((tout >= 0) && (!*done)) {
13377 /* progress indicator - HV can take its own sweet time in
13380 DP_CONT(BNX2X_MSG_IOV, ".");
13384 BNX2X_ERR("PF response has timed out\n");
13387 DP(BNX2X_MSG_SP, "Got a response from PF\n");
13391 int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
13394 int tout = 10, interval = 100; /* Wait for 1 sec */
13397 /* pxp traps vf read of doorbells and returns me reg value */
13398 me_reg = readl(bp->doorbells);
13399 if (GOOD_ME_REG(me_reg))
13404 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?",
13406 } while (tout-- > 0);
13408 if (!GOOD_ME_REG(me_reg)) {
13409 BNX2X_ERR("Invalid ME register value: 0x%08x\n", me_reg);
13413 BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
13415 *vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
13420 int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
13422 int rc = 0, attempts = 0;
13423 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire;
13424 struct pfvf_acquire_resp_tlv *resp = &bp->vf2pf_mbox->resp.acquire_resp;
13426 bool resources_acquired = false;
13428 /* clear mailbox and prep first tlv */
13429 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
13431 if (bnx2x_get_vf_id(bp, &vf_id))
13434 req->vfdev_info.vf_id = vf_id;
13435 req->vfdev_info.vf_os = 0;
13437 req->resc_request.num_rxqs = rx_count;
13438 req->resc_request.num_txqs = tx_count;
13439 req->resc_request.num_sbs = bp->igu_sb_cnt;
13440 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS;
13441 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS;
13443 /* add list termination tlv */
13444 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13445 sizeof(struct channel_list_end_tlv));
13447 /* output tlvs list */
13448 bnx2x_dp_tlv_list(bp, req);
13450 while (!resources_acquired) {
13451 DP(BNX2X_MSG_SP, "attempting to acquire resources\n");
13453 /* send acquire request */
13454 rc = bnx2x_send_msg2pf(bp,
13456 bp->vf2pf_mbox_mapping);
13462 /* copy acquire response from buffer to bp */
13463 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
13467 /* test whether the PF accepted our request. If not, humble the
13468 * the request and try again.
13470 if (bp->acquire_resp.hdr.status == PFVF_STATUS_SUCCESS) {
13471 DP(BNX2X_MSG_SP, "resources acquired\n");
13472 resources_acquired = true;
13473 } else if (bp->acquire_resp.hdr.status ==
13474 PFVF_STATUS_NO_RESOURCE &&
13475 attempts < VF_ACQUIRE_THRESH) {
13477 "PF unwilling to fulfill resource request. Try PF recommended amount\n");
13479 /* humble our request */
13480 req->resc_request.num_txqs =
13481 bp->acquire_resp.resc.num_txqs;
13482 req->resc_request.num_rxqs =
13483 bp->acquire_resp.resc.num_rxqs;
13484 req->resc_request.num_sbs =
13485 bp->acquire_resp.resc.num_sbs;
13486 req->resc_request.num_mac_filters =
13487 bp->acquire_resp.resc.num_mac_filters;
13488 req->resc_request.num_vlan_filters =
13489 bp->acquire_resp.resc.num_vlan_filters;
13490 req->resc_request.num_mc_filters =
13491 bp->acquire_resp.resc.num_mc_filters;
13493 /* Clear response buffer */
13494 memset(&bp->vf2pf_mbox->resp, 0,
13495 sizeof(union pfvf_tlvs));
13497 /* PF reports error */
13498 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
13499 bp->acquire_resp.hdr.status);
13505 bp->common.chip_id |= (bp->acquire_resp.pfdev_info.chip_num & 0xffff);
13506 bp->link_params.chip_id = bp->common.chip_id;
13507 bp->db_size = bp->acquire_resp.pfdev_info.db_size;
13508 bp->common.int_block = INT_BLOCK_IGU;
13509 bp->common.chip_port_mode = CHIP_2_PORT_MODE;
13510 bp->igu_dsb_id = -1;
13513 bp->common.flash_size = 0;
13515 NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
13516 bp->igu_sb_cnt = 1;
13517 bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
13518 strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
13519 sizeof(bp->fw_ver));
13521 if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
13522 memcpy(bp->dev->dev_addr,
13523 bp->acquire_resp.resc.current_mac_addr,
13529 int bnx2x_vfpf_release(struct bnx2x *bp)
13531 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
13532 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13535 /* clear mailbox and prep first tlv */
13536 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
13538 if (bnx2x_get_vf_id(bp, &vf_id))
13541 req->vf_id = vf_id;
13543 /* add list termination tlv */
13544 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13545 sizeof(struct channel_list_end_tlv));
13547 /* output tlvs list */
13548 bnx2x_dp_tlv_list(bp, req);
13550 /* send release request */
13551 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13556 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
13557 /* PF released us */
13558 DP(BNX2X_MSG_SP, "vf released\n");
13560 /* PF reports error */
13561 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
13569 /* Tell PF about SB addresses */
13570 int bnx2x_vfpf_init(struct bnx2x *bp)
13572 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init;
13573 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13576 /* clear mailbox and prep first tlv */
13577 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req));
13579 /* status blocks */
13580 for_each_eth_queue(bp, i)
13581 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i,
13582 status_blk_mapping);
13584 /* statistics - requests only supports single queue for now */
13585 req->stats_addr = bp->fw_stats_data_mapping +
13586 offsetof(struct bnx2x_fw_stats_data, queue_stats);
13588 /* add list termination tlv */
13589 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13590 sizeof(struct channel_list_end_tlv));
13592 /* output tlvs list */
13593 bnx2x_dp_tlv_list(bp, req);
13595 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13599 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13600 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
13605 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
13609 /* CLOSE VF - opposite to INIT_VF */
13610 void bnx2x_vfpf_close_vf(struct bnx2x *bp)
13612 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close;
13613 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13617 /* If we haven't got a valid VF id, there is no sense to
13618 * continue with sending messages
13620 if (bnx2x_get_vf_id(bp, &vf_id))
13623 /* Close the queues */
13624 for_each_queue(bp, i)
13625 bnx2x_vfpf_teardown_queue(bp, i);
13627 /* clear mailbox and prep first tlv */
13628 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req));
13630 req->vf_id = vf_id;
13632 /* add list termination tlv */
13633 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13634 sizeof(struct channel_list_end_tlv));
13636 /* output tlvs list */
13637 bnx2x_dp_tlv_list(bp, req);
13639 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13642 BNX2X_ERR("Sending CLOSE failed. rc was: %d\n", rc);
13644 else if (resp->hdr.status != PFVF_STATUS_SUCCESS)
13645 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
13649 /* Disable HW interrupts, NAPI */
13650 bnx2x_netif_stop(bp, 0);
13651 /* Delete all NAPI objects */
13652 bnx2x_del_all_napi(bp);
13655 bnx2x_free_irq(bp);
13658 /* ask the pf to open a queue for the vf */
13659 int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
13661 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
13662 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13663 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
13664 u16 tpa_agg_size = 0, flags = 0;
13667 /* clear mailbox and prep first tlv */
13668 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
13670 /* select tpa mode to request */
13671 if (!fp->disable_tpa) {
13672 flags |= VFPF_QUEUE_FLG_TPA;
13673 flags |= VFPF_QUEUE_FLG_TPA_IPV6;
13674 if (fp->mode == TPA_MODE_GRO)
13675 flags |= VFPF_QUEUE_FLG_TPA_GRO;
13676 tpa_agg_size = TPA_AGG_SIZE;
13679 /* calculate queue flags */
13680 flags |= VFPF_QUEUE_FLG_STATS;
13681 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
13682 flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
13683 flags |= VFPF_QUEUE_FLG_VLAN;
13684 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
13687 req->vf_qid = fp_idx;
13688 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID;
13691 req->rxq.rcq_addr = fp->rx_comp_mapping;
13692 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE;
13693 req->rxq.rxq_addr = fp->rx_desc_mapping;
13694 req->rxq.sge_addr = fp->rx_sge_mapping;
13695 req->rxq.vf_sb = fp_idx;
13696 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS;
13697 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0;
13698 req->rxq.mtu = bp->dev->mtu;
13699 req->rxq.buf_sz = fp->rx_buf_size;
13700 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE;
13701 req->rxq.tpa_agg_sz = tpa_agg_size;
13702 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
13703 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) &
13704 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
13705 req->rxq.flags = flags;
13706 req->rxq.drop_flags = 0;
13707 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT;
13708 req->rxq.stat_id = -1; /* No stats at the moment */
13711 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping;
13712 req->txq.vf_sb = fp_idx;
13713 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0;
13714 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0;
13715 req->txq.flags = flags;
13716 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW;
13718 /* add list termination tlv */
13719 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13720 sizeof(struct channel_list_end_tlv));
13722 /* output tlvs list */
13723 bnx2x_dp_tlv_list(bp, req);
13725 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13727 BNX2X_ERR("Sending SETUP_Q message for queue[%d] failed!\n",
13730 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13731 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
13732 fp_idx, resp->hdr.status);
13738 int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
13740 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op;
13741 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13744 /* clear mailbox and prep first tlv */
13745 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q,
13748 req->vf_qid = qidx;
13750 /* add list termination tlv */
13751 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13752 sizeof(struct channel_list_end_tlv));
13754 /* output tlvs list */
13755 bnx2x_dp_tlv_list(bp, req);
13757 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13760 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
13765 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13766 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
13774 /* request pf to add a mac for the vf */
13775 int bnx2x_vfpf_set_mac(struct bnx2x *bp)
13777 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
13778 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13781 /* clear mailbox and prep first tlv */
13782 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
13785 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED;
13787 req->n_mac_vlan_filters = 1;
13788 req->filters[0].flags =
13789 VFPF_Q_FILTER_DEST_MAC_VALID | VFPF_Q_FILTER_SET_MAC;
13791 /* copy mac from device to request */
13792 memcpy(req->filters[0].mac, bp->dev->dev_addr, ETH_ALEN);
13794 /* add list termination tlv */
13795 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13796 sizeof(struct channel_list_end_tlv));
13798 /* output tlvs list */
13799 bnx2x_dp_tlv_list(bp, req);
13801 /* send message to pf */
13802 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13804 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
13808 /* PF failed the transaction */
13809 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13810 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
13817 int bnx2x_vfpf_set_mcast(struct net_device *dev)
13819 struct bnx2x *bp = netdev_priv(dev);
13820 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
13821 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13823 struct netdev_hw_addr *ha;
13825 if (bp->state != BNX2X_STATE_OPEN) {
13826 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
13830 /* clear mailbox and prep first tlv */
13831 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
13834 /* Get Rx mode requested */
13835 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
13837 netdev_for_each_mc_addr(ha, dev) {
13838 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
13839 bnx2x_mc_addr(ha));
13840 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN);
13844 /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
13847 if (i >= PFVF_MAX_MULTICAST_PER_VF) {
13849 "VF supports not more than %d multicast MAC addresses\n",
13850 PFVF_MAX_MULTICAST_PER_VF);
13854 req->n_multicast = i;
13855 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
13858 /* add list termination tlv */
13859 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13860 sizeof(struct channel_list_end_tlv));
13862 /* output tlvs list */
13863 bnx2x_dp_tlv_list(bp, req);
13865 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13867 BNX2X_ERR("Sending a message failed: %d\n", rc);
13871 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13872 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
13880 int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
13882 int mode = bp->rx_mode;
13883 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
13884 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
13887 /* clear mailbox and prep first tlv */
13888 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
13891 DP(NETIF_MSG_IFUP, "Rx mode is %d\n", mode);
13894 case BNX2X_RX_MODE_NONE: /* no Rx */
13895 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE;
13897 case BNX2X_RX_MODE_NORMAL:
13898 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST;
13899 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
13900 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
13902 case BNX2X_RX_MODE_ALLMULTI:
13903 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
13904 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST;
13905 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
13907 case BNX2X_RX_MODE_PROMISC:
13908 req->rx_mask = VFPF_RX_MASK_ACCEPT_ALL_UNICAST;
13909 req->rx_mask |= VFPF_RX_MASK_ACCEPT_ALL_MULTICAST;
13910 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST;
13913 BNX2X_ERR("BAD rx mode (%d)\n", mode);
13917 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
13920 /* add list termination tlv */
13921 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
13922 sizeof(struct channel_list_end_tlv));
13924 /* output tlvs list */
13925 bnx2x_dp_tlv_list(bp, req);
13927 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
13929 BNX2X_ERR("Sending a message failed: %d\n", rc);
13931 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
13932 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);