1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2012 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/device.h> /* for dev_info() */
24 #include <linux/timer.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/init.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/bitops.h>
36 #include <linux/irq.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/time.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
54 #include <linux/semaphore.h>
55 #include <linux/stringify.h>
56 #include <linux/vmalloc.h>
59 #include "bnx2x_init.h"
60 #include "bnx2x_init_ops.h"
61 #include "bnx2x_cmn.h"
62 #include "bnx2x_dcb.h"
65 #include <linux/firmware.h>
66 #include "bnx2x_fw_file_hdr.h"
68 #define FW_FILE_VERSION \
69 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
70 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
71 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
72 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
73 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
74 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77 #define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
79 /* Time in jiffies before concluding the transmitter is hung */
80 #define TX_TIMEOUT (5*HZ)
82 static char version[] __devinitdata =
83 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
84 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
86 MODULE_AUTHOR("Eliezer Tamir");
87 MODULE_DESCRIPTION("Broadcom NetXtreme II "
88 "BCM57710/57711/57711E/"
89 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
90 "57840/57840_MF Driver");
91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_MODULE_VERSION);
93 MODULE_FIRMWARE(FW_FILE_NAME_E1);
94 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
95 MODULE_FIRMWARE(FW_FILE_NAME_E2);
99 module_param(num_queues, int, 0);
100 MODULE_PARM_DESC(num_queues,
101 " Set number of queues (default is as a number of CPUs)");
103 static int disable_tpa;
104 module_param(disable_tpa, int, 0);
105 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
107 #define INT_MODE_INTx 1
108 #define INT_MODE_MSI 2
110 module_param(int_mode, int, 0);
111 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
114 static int dropless_fc;
115 module_param(dropless_fc, int, 0);
116 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
118 static int mrrs = -1;
119 module_param(mrrs, int, 0);
120 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
123 module_param(debug, int, 0);
124 MODULE_PARM_DESC(debug, " Default debug msglevel");
128 struct workqueue_struct *bnx2x_wq;
130 enum bnx2x_board_type {
149 /* indexed by board_type, above */
152 } board_info[] __devinitdata = {
153 { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
154 { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
155 { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
156 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
157 { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
158 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
159 { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
160 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
161 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
162 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
163 { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
164 { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
165 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
166 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
167 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
168 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
171 #ifndef PCI_DEVICE_ID_NX2_57710
172 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
174 #ifndef PCI_DEVICE_ID_NX2_57711
175 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
177 #ifndef PCI_DEVICE_ID_NX2_57711E
178 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
180 #ifndef PCI_DEVICE_ID_NX2_57712
181 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
183 #ifndef PCI_DEVICE_ID_NX2_57712_MF
184 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
186 #ifndef PCI_DEVICE_ID_NX2_57800
187 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
189 #ifndef PCI_DEVICE_ID_NX2_57800_MF
190 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
192 #ifndef PCI_DEVICE_ID_NX2_57810
193 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
195 #ifndef PCI_DEVICE_ID_NX2_57810_MF
196 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
198 #ifndef PCI_DEVICE_ID_NX2_57840_O
199 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
201 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
202 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
204 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
205 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
207 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
208 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
210 #ifndef PCI_DEVICE_ID_NX2_57840_MF
211 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
213 #ifndef PCI_DEVICE_ID_NX2_57811
214 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
216 #ifndef PCI_DEVICE_ID_NX2_57811_MF
217 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
219 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
220 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
221 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
222 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
223 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
224 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
225 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
226 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
227 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
228 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
229 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
230 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
231 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
232 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
233 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
234 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
235 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
239 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
241 /* Global resources for unloading a previously loaded device */
242 #define BNX2X_PREV_WAIT_NEEDED 1
243 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
244 static LIST_HEAD(bnx2x_prev_list);
245 /****************************************************************************
246 * General service functions
247 ****************************************************************************/
249 static void __storm_memset_dma_mapping(struct bnx2x *bp,
250 u32 addr, dma_addr_t mapping)
252 REG_WR(bp, addr, U64_LO(mapping));
253 REG_WR(bp, addr + 4, U64_HI(mapping));
256 static void storm_memset_spq_addr(struct bnx2x *bp,
257 dma_addr_t mapping, u16 abs_fid)
259 u32 addr = XSEM_REG_FAST_MEMORY +
260 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
262 __storm_memset_dma_mapping(bp, addr, mapping);
265 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
268 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
270 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
272 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
274 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
278 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
281 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
283 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
285 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
287 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
291 static void storm_memset_eq_data(struct bnx2x *bp,
292 struct event_ring_data *eq_data,
295 size_t size = sizeof(struct event_ring_data);
297 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
299 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
302 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
305 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
306 REG_WR16(bp, addr, eq_prod);
310 * locking is done by mcp
312 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
314 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
315 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
316 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
317 PCICFG_VENDOR_ID_OFFSET);
320 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
324 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
325 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
326 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
327 PCICFG_VENDOR_ID_OFFSET);
332 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
333 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
334 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
335 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
336 #define DMAE_DP_DST_NONE "dst_addr [none]"
339 /* copy command into DMAE command memory and set DMAE command go */
340 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
345 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
346 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
347 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
349 REG_WR(bp, dmae_reg_go_c[idx], 1);
352 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
354 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
358 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
360 return opcode & ~DMAE_CMD_SRC_RESET;
363 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
364 bool with_comp, u8 comp_type)
368 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
369 (dst_type << DMAE_COMMAND_DST_SHIFT));
371 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
373 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
374 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
375 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
376 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
379 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
381 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
384 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
388 static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
389 struct dmae_command *dmae,
390 u8 src_type, u8 dst_type)
392 memset(dmae, 0, sizeof(struct dmae_command));
395 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
396 true, DMAE_COMP_PCI);
398 /* fill in the completion parameters */
399 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
400 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
401 dmae->comp_val = DMAE_COMP_VAL;
404 /* issue a dmae command over the init-channel and wailt for completion */
405 static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
406 struct dmae_command *dmae)
408 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
409 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
413 * Lock the dmae channel. Disable BHs to prevent a dead-lock
414 * as long as this code is called both from syscall context and
415 * from ndo_set_rx_mode() flow that may be called from BH.
417 spin_lock_bh(&bp->dmae_lock);
419 /* reset completion */
422 /* post the command on the channel used for initializations */
423 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
425 /* wait for completion */
427 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
430 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
431 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
432 BNX2X_ERR("DMAE timeout!\n");
439 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
440 BNX2X_ERR("DMAE PCI error!\n");
445 spin_unlock_bh(&bp->dmae_lock);
449 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
452 struct dmae_command dmae;
454 if (!bp->dmae_ready) {
455 u32 *data = bnx2x_sp(bp, wb_data[0]);
458 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
460 bnx2x_init_str_wr(bp, dst_addr, data, len32);
464 /* set opcode and fixed command fields */
465 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
467 /* fill in addresses and len */
468 dmae.src_addr_lo = U64_LO(dma_addr);
469 dmae.src_addr_hi = U64_HI(dma_addr);
470 dmae.dst_addr_lo = dst_addr >> 2;
471 dmae.dst_addr_hi = 0;
474 /* issue the command and wait for completion */
475 bnx2x_issue_dmae_with_comp(bp, &dmae);
478 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
480 struct dmae_command dmae;
482 if (!bp->dmae_ready) {
483 u32 *data = bnx2x_sp(bp, wb_data[0]);
487 for (i = 0; i < len32; i++)
488 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
490 for (i = 0; i < len32; i++)
491 data[i] = REG_RD(bp, src_addr + i*4);
496 /* set opcode and fixed command fields */
497 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
499 /* fill in addresses and len */
500 dmae.src_addr_lo = src_addr >> 2;
501 dmae.src_addr_hi = 0;
502 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
503 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
506 /* issue the command and wait for completion */
507 bnx2x_issue_dmae_with_comp(bp, &dmae);
510 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
513 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
516 while (len > dmae_wr_max) {
517 bnx2x_write_dmae(bp, phys_addr + offset,
518 addr + offset, dmae_wr_max);
519 offset += dmae_wr_max * 4;
523 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
526 static int bnx2x_mc_assert(struct bnx2x *bp)
530 u32 row0, row1, row2, row3;
533 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
534 XSTORM_ASSERT_LIST_INDEX_OFFSET);
536 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
538 /* print the asserts */
539 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
541 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
542 XSTORM_ASSERT_LIST_OFFSET(i));
543 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
544 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
545 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
546 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
547 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
548 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
550 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
551 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
552 i, row3, row2, row1, row0);
560 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
561 TSTORM_ASSERT_LIST_INDEX_OFFSET);
563 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
565 /* print the asserts */
566 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
568 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
569 TSTORM_ASSERT_LIST_OFFSET(i));
570 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
571 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
572 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
573 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
574 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
575 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
577 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
578 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
579 i, row3, row2, row1, row0);
587 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
588 CSTORM_ASSERT_LIST_INDEX_OFFSET);
590 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
592 /* print the asserts */
593 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
595 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
596 CSTORM_ASSERT_LIST_OFFSET(i));
597 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
598 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
599 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
600 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
601 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
602 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
604 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
605 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
606 i, row3, row2, row1, row0);
614 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
615 USTORM_ASSERT_LIST_INDEX_OFFSET);
617 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
619 /* print the asserts */
620 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
622 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
623 USTORM_ASSERT_LIST_OFFSET(i));
624 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
625 USTORM_ASSERT_LIST_OFFSET(i) + 4);
626 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
627 USTORM_ASSERT_LIST_OFFSET(i) + 8);
628 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
629 USTORM_ASSERT_LIST_OFFSET(i) + 12);
631 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
632 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
633 i, row3, row2, row1, row0);
643 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
649 u32 trace_shmem_base;
651 BNX2X_ERR("NO MCP - can not dump\n");
654 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
655 (bp->common.bc_ver & 0xff0000) >> 16,
656 (bp->common.bc_ver & 0xff00) >> 8,
657 (bp->common.bc_ver & 0xff));
659 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
660 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
661 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
663 if (BP_PATH(bp) == 0)
664 trace_shmem_base = bp->common.shmem_base;
666 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
667 addr = trace_shmem_base - 0x800;
669 /* validate TRCB signature */
670 mark = REG_RD(bp, addr);
671 if (mark != MFW_TRACE_SIGNATURE) {
672 BNX2X_ERR("Trace buffer signature is missing.");
676 /* read cyclic buffer pointer */
678 mark = REG_RD(bp, addr);
679 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
680 + ((mark + 0x3) & ~0x3) - 0x08000000;
681 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
684 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
685 for (word = 0; word < 8; word++)
686 data[word] = htonl(REG_RD(bp, offset + 4*word));
688 pr_cont("%s", (char *)data);
690 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
691 for (word = 0; word < 8; word++)
692 data[word] = htonl(REG_RD(bp, offset + 4*word));
694 pr_cont("%s", (char *)data);
696 printk("%s" "end of fw dump\n", lvl);
699 static void bnx2x_fw_dump(struct bnx2x *bp)
701 bnx2x_fw_dump_lvl(bp, KERN_ERR);
704 void bnx2x_panic_dump(struct bnx2x *bp)
708 struct hc_sp_status_block_data sp_sb_data;
709 int func = BP_FUNC(bp);
710 #ifdef BNX2X_STOP_ON_ERROR
711 u16 start = 0, end = 0;
715 bp->stats_state = STATS_STATE_DISABLED;
716 bp->eth_stats.unrecoverable_error++;
717 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
719 BNX2X_ERR("begin crash dump -----------------\n");
723 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
724 bp->def_idx, bp->def_att_idx, bp->attn_state,
725 bp->spq_prod_idx, bp->stats_counter);
726 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
727 bp->def_status_blk->atten_status_block.attn_bits,
728 bp->def_status_blk->atten_status_block.attn_bits_ack,
729 bp->def_status_blk->atten_status_block.status_block_id,
730 bp->def_status_blk->atten_status_block.attn_bits_index);
732 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
734 bp->def_status_blk->sp_sb.index_values[i],
735 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
737 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
738 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
739 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
742 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
743 sp_sb_data.igu_sb_id,
744 sp_sb_data.igu_seg_id,
745 sp_sb_data.p_func.pf_id,
746 sp_sb_data.p_func.vnic_id,
747 sp_sb_data.p_func.vf_id,
748 sp_sb_data.p_func.vf_valid,
752 for_each_eth_queue(bp, i) {
753 struct bnx2x_fastpath *fp = &bp->fp[i];
755 struct hc_status_block_data_e2 sb_data_e2;
756 struct hc_status_block_data_e1x sb_data_e1x;
757 struct hc_status_block_sm *hc_sm_p =
759 sb_data_e1x.common.state_machine :
760 sb_data_e2.common.state_machine;
761 struct hc_index_data *hc_index_p =
763 sb_data_e1x.index_data :
764 sb_data_e2.index_data;
767 struct bnx2x_fp_txdata txdata;
770 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
771 i, fp->rx_bd_prod, fp->rx_bd_cons,
773 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
774 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
775 fp->rx_sge_prod, fp->last_max_sge,
776 le16_to_cpu(fp->fp_hc_idx));
779 for_each_cos_in_tx_queue(fp, cos)
781 txdata = *fp->txdata_ptr[cos];
782 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
783 i, txdata.tx_pkt_prod,
784 txdata.tx_pkt_cons, txdata.tx_bd_prod,
786 le16_to_cpu(*txdata.tx_cons_sb));
789 loop = CHIP_IS_E1x(bp) ?
790 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
797 BNX2X_ERR(" run indexes (");
798 for (j = 0; j < HC_SB_MAX_SM; j++)
800 fp->sb_running_index[j],
801 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
803 BNX2X_ERR(" indexes (");
804 for (j = 0; j < loop; j++)
806 fp->sb_index_values[j],
807 (j == loop - 1) ? ")" : " ");
809 data_size = CHIP_IS_E1x(bp) ?
810 sizeof(struct hc_status_block_data_e1x) :
811 sizeof(struct hc_status_block_data_e2);
812 data_size /= sizeof(u32);
813 sb_data_p = CHIP_IS_E1x(bp) ?
814 (u32 *)&sb_data_e1x :
816 /* copy sb data in here */
817 for (j = 0; j < data_size; j++)
818 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
819 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
822 if (!CHIP_IS_E1x(bp)) {
823 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
824 sb_data_e2.common.p_func.pf_id,
825 sb_data_e2.common.p_func.vf_id,
826 sb_data_e2.common.p_func.vf_valid,
827 sb_data_e2.common.p_func.vnic_id,
828 sb_data_e2.common.same_igu_sb_1b,
829 sb_data_e2.common.state);
831 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
832 sb_data_e1x.common.p_func.pf_id,
833 sb_data_e1x.common.p_func.vf_id,
834 sb_data_e1x.common.p_func.vf_valid,
835 sb_data_e1x.common.p_func.vnic_id,
836 sb_data_e1x.common.same_igu_sb_1b,
837 sb_data_e1x.common.state);
841 for (j = 0; j < HC_SB_MAX_SM; j++) {
842 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
843 j, hc_sm_p[j].__flags,
844 hc_sm_p[j].igu_sb_id,
845 hc_sm_p[j].igu_seg_id,
846 hc_sm_p[j].time_to_expire,
847 hc_sm_p[j].timer_value);
851 for (j = 0; j < loop; j++) {
852 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
854 hc_index_p[j].timeout);
858 #ifdef BNX2X_STOP_ON_ERROR
861 for_each_valid_rx_queue(bp, i) {
862 struct bnx2x_fastpath *fp = &bp->fp[i];
864 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
865 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
866 for (j = start; j != end; j = RX_BD(j + 1)) {
867 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
868 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
870 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
871 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
874 start = RX_SGE(fp->rx_sge_prod);
875 end = RX_SGE(fp->last_max_sge);
876 for (j = start; j != end; j = RX_SGE(j + 1)) {
877 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
878 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
880 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
881 i, j, rx_sge[1], rx_sge[0], sw_page->page);
884 start = RCQ_BD(fp->rx_comp_cons - 10);
885 end = RCQ_BD(fp->rx_comp_cons + 503);
886 for (j = start; j != end; j = RCQ_BD(j + 1)) {
887 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
889 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
890 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
895 for_each_valid_tx_queue(bp, i) {
896 struct bnx2x_fastpath *fp = &bp->fp[i];
897 for_each_cos_in_tx_queue(fp, cos) {
898 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
900 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
901 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
902 for (j = start; j != end; j = TX_BD(j + 1)) {
903 struct sw_tx_bd *sw_bd =
904 &txdata->tx_buf_ring[j];
906 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
907 i, cos, j, sw_bd->skb,
911 start = TX_BD(txdata->tx_bd_cons - 10);
912 end = TX_BD(txdata->tx_bd_cons + 254);
913 for (j = start; j != end; j = TX_BD(j + 1)) {
914 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
916 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
917 i, cos, j, tx_bd[0], tx_bd[1],
925 BNX2X_ERR("end crash dump -----------------\n");
931 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
934 #define FLR_WAIT_USEC 10000 /* 10 miliseconds */
935 #define FLR_WAIT_INTERVAL 50 /* usec */
936 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
938 struct pbf_pN_buf_regs {
945 struct pbf_pN_cmd_regs {
951 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
952 struct pbf_pN_buf_regs *regs,
955 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
956 u32 cur_cnt = poll_count;
958 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
959 crd = crd_start = REG_RD(bp, regs->crd);
960 init_crd = REG_RD(bp, regs->init_crd);
962 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
963 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
964 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
966 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
967 (init_crd - crd_start))) {
969 udelay(FLR_WAIT_INTERVAL);
970 crd = REG_RD(bp, regs->crd);
971 crd_freed = REG_RD(bp, regs->crd_freed);
973 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
975 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
977 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
978 regs->pN, crd_freed);
982 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
983 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
986 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
987 struct pbf_pN_cmd_regs *regs,
990 u32 occup, to_free, freed, freed_start;
991 u32 cur_cnt = poll_count;
993 occup = to_free = REG_RD(bp, regs->lines_occup);
994 freed = freed_start = REG_RD(bp, regs->lines_freed);
996 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
997 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
999 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1001 udelay(FLR_WAIT_INTERVAL);
1002 occup = REG_RD(bp, regs->lines_occup);
1003 freed = REG_RD(bp, regs->lines_freed);
1005 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1007 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1009 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1014 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1015 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1018 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1019 u32 expected, u32 poll_count)
1021 u32 cur_cnt = poll_count;
1024 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1025 udelay(FLR_WAIT_INTERVAL);
1030 static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1031 char *msg, u32 poll_cnt)
1033 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1035 BNX2X_ERR("%s usage count=%d\n", msg, val);
1041 static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1043 /* adjust polling timeout */
1044 if (CHIP_REV_IS_EMUL(bp))
1045 return FLR_POLL_CNT * 2000;
1047 if (CHIP_REV_IS_FPGA(bp))
1048 return FLR_POLL_CNT * 120;
1050 return FLR_POLL_CNT;
1053 static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1055 struct pbf_pN_cmd_regs cmd_regs[] = {
1056 {0, (CHIP_IS_E3B0(bp)) ?
1057 PBF_REG_TQ_OCCUPANCY_Q0 :
1058 PBF_REG_P0_TQ_OCCUPANCY,
1059 (CHIP_IS_E3B0(bp)) ?
1060 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1061 PBF_REG_P0_TQ_LINES_FREED_CNT},
1062 {1, (CHIP_IS_E3B0(bp)) ?
1063 PBF_REG_TQ_OCCUPANCY_Q1 :
1064 PBF_REG_P1_TQ_OCCUPANCY,
1065 (CHIP_IS_E3B0(bp)) ?
1066 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1067 PBF_REG_P1_TQ_LINES_FREED_CNT},
1068 {4, (CHIP_IS_E3B0(bp)) ?
1069 PBF_REG_TQ_OCCUPANCY_LB_Q :
1070 PBF_REG_P4_TQ_OCCUPANCY,
1071 (CHIP_IS_E3B0(bp)) ?
1072 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1073 PBF_REG_P4_TQ_LINES_FREED_CNT}
1076 struct pbf_pN_buf_regs buf_regs[] = {
1077 {0, (CHIP_IS_E3B0(bp)) ?
1078 PBF_REG_INIT_CRD_Q0 :
1079 PBF_REG_P0_INIT_CRD ,
1080 (CHIP_IS_E3B0(bp)) ?
1083 (CHIP_IS_E3B0(bp)) ?
1084 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1085 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1086 {1, (CHIP_IS_E3B0(bp)) ?
1087 PBF_REG_INIT_CRD_Q1 :
1088 PBF_REG_P1_INIT_CRD,
1089 (CHIP_IS_E3B0(bp)) ?
1092 (CHIP_IS_E3B0(bp)) ?
1093 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1094 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1095 {4, (CHIP_IS_E3B0(bp)) ?
1096 PBF_REG_INIT_CRD_LB_Q :
1097 PBF_REG_P4_INIT_CRD,
1098 (CHIP_IS_E3B0(bp)) ?
1099 PBF_REG_CREDIT_LB_Q :
1101 (CHIP_IS_E3B0(bp)) ?
1102 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1103 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1108 /* Verify the command queues are flushed P0, P1, P4 */
1109 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1110 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1113 /* Verify the transmission buffers are flushed P0, P1, P4 */
1114 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1115 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1118 #define OP_GEN_PARAM(param) \
1119 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1121 #define OP_GEN_TYPE(type) \
1122 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1124 #define OP_GEN_AGG_VECT(index) \
1125 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1128 static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1131 struct sdm_op_gen op_gen = {0};
1133 u32 comp_addr = BAR_CSTRORM_INTMEM +
1134 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1137 if (REG_RD(bp, comp_addr)) {
1138 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1142 op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1143 op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1144 op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
1145 op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1147 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1148 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
1150 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1151 BNX2X_ERR("FW final cleanup did not succeed\n");
1152 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1153 (REG_RD(bp, comp_addr)));
1156 /* Zero completion for nxt FLR */
1157 REG_WR(bp, comp_addr, 0);
1162 static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1166 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1167 return status & PCI_EXP_DEVSTA_TRPND;
1170 /* PF FLR specific routines
1172 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1175 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1176 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1177 CFC_REG_NUM_LCIDS_INSIDE_PF,
1178 "CFC PF usage counter timed out",
1183 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1184 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1185 DORQ_REG_PF_USAGE_CNT,
1186 "DQ PF usage counter timed out",
1190 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1191 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1192 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1193 "QM PF usage counter timed out",
1197 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1198 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1199 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1200 "Timers VNIC usage counter timed out",
1203 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1204 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1205 "Timers NUM_SCANS usage counter timed out",
1209 /* Wait DMAE PF usage counter to zero */
1210 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1211 dmae_reg_go_c[INIT_DMAE_C(bp)],
1212 "DMAE dommand register timed out",
1219 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1223 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1224 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1226 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1227 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1229 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1230 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1232 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1233 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1235 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1236 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1238 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1239 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1241 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1242 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1244 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1245 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1249 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1251 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1253 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1255 /* Re-enable PF target read access */
1256 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1258 /* Poll HW usage counters */
1259 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1260 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1263 /* Zero the igu 'trailing edge' and 'leading edge' */
1265 /* Send the FW cleanup command */
1266 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1271 /* Verify TX hw is flushed */
1272 bnx2x_tx_hw_flushed(bp, poll_cnt);
1274 /* Wait 100ms (not adjusted according to platform) */
1277 /* Verify no pending pci transactions */
1278 if (bnx2x_is_pcie_pending(bp->pdev))
1279 BNX2X_ERR("PCIE Transactions still pending\n");
1282 bnx2x_hw_enable_status(bp);
1285 * Master enable - Due to WB DMAE writes performed before this
1286 * register is re-initialized as part of the regular function init
1288 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1293 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1295 int port = BP_PORT(bp);
1296 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1297 u32 val = REG_RD(bp, addr);
1298 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1299 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1300 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1303 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1304 HC_CONFIG_0_REG_INT_LINE_EN_0);
1305 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1306 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1308 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1310 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1311 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1312 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1313 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1315 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1316 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1317 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1318 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1320 if (!CHIP_IS_E1(bp)) {
1322 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1324 REG_WR(bp, addr, val);
1326 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1331 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1334 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1335 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1337 REG_WR(bp, addr, val);
1339 * Ensure that HC_CONFIG is written before leading/trailing edge config
1344 if (!CHIP_IS_E1(bp)) {
1345 /* init leading/trailing edge */
1347 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1349 /* enable nig and gpio3 attention */
1354 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1355 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1358 /* Make sure that interrupts are indeed enabled from here on */
1362 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1365 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1366 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1367 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1369 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1372 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1373 IGU_PF_CONF_SINGLE_ISR_EN);
1374 val |= (IGU_PF_CONF_FUNC_EN |
1375 IGU_PF_CONF_MSI_MSIX_EN |
1376 IGU_PF_CONF_ATTN_BIT_EN);
1379 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1381 val &= ~IGU_PF_CONF_INT_LINE_EN;
1382 val |= (IGU_PF_CONF_FUNC_EN |
1383 IGU_PF_CONF_MSI_MSIX_EN |
1384 IGU_PF_CONF_ATTN_BIT_EN |
1385 IGU_PF_CONF_SINGLE_ISR_EN);
1387 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1388 val |= (IGU_PF_CONF_FUNC_EN |
1389 IGU_PF_CONF_INT_LINE_EN |
1390 IGU_PF_CONF_ATTN_BIT_EN |
1391 IGU_PF_CONF_SINGLE_ISR_EN);
1394 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1395 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1397 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1399 if (val & IGU_PF_CONF_INT_LINE_EN)
1400 pci_intx(bp->pdev, true);
1404 /* init leading/trailing edge */
1406 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1408 /* enable nig and gpio3 attention */
1413 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1414 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1416 /* Make sure that interrupts are indeed enabled from here on */
1420 void bnx2x_int_enable(struct bnx2x *bp)
1422 if (bp->common.int_block == INT_BLOCK_HC)
1423 bnx2x_hc_int_enable(bp);
1425 bnx2x_igu_int_enable(bp);
1428 static void bnx2x_hc_int_disable(struct bnx2x *bp)
1430 int port = BP_PORT(bp);
1431 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1432 u32 val = REG_RD(bp, addr);
1435 * in E1 we must use only PCI configuration space to disable
1436 * MSI/MSIX capablility
1437 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1439 if (CHIP_IS_E1(bp)) {
1440 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1441 * Use mask register to prevent from HC sending interrupts
1442 * after we exit the function
1444 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1446 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1447 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1448 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1450 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1451 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1452 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1453 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1455 DP(NETIF_MSG_IFDOWN,
1456 "write %x to HC %d (addr 0x%x)\n",
1459 /* flush all outstanding writes */
1462 REG_WR(bp, addr, val);
1463 if (REG_RD(bp, addr) != val)
1464 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1467 static void bnx2x_igu_int_disable(struct bnx2x *bp)
1469 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1471 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1472 IGU_PF_CONF_INT_LINE_EN |
1473 IGU_PF_CONF_ATTN_BIT_EN);
1475 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
1477 /* flush all outstanding writes */
1480 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1481 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1482 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1485 static void bnx2x_int_disable(struct bnx2x *bp)
1487 if (bp->common.int_block == INT_BLOCK_HC)
1488 bnx2x_hc_int_disable(bp);
1490 bnx2x_igu_int_disable(bp);
1493 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1495 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1499 /* prevent the HW from sending interrupts */
1500 bnx2x_int_disable(bp);
1502 /* make sure all ISRs are done */
1504 synchronize_irq(bp->msix_table[0].vector);
1506 if (CNIC_SUPPORT(bp))
1508 for_each_eth_queue(bp, i)
1509 synchronize_irq(bp->msix_table[offset++].vector);
1511 synchronize_irq(bp->pdev->irq);
1513 /* make sure sp_task is not running */
1514 cancel_delayed_work(&bp->sp_task);
1515 cancel_delayed_work(&bp->period_task);
1516 flush_workqueue(bnx2x_wq);
1522 * General service functions
1525 /* Return true if succeeded to acquire the lock */
1526 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1529 u32 resource_bit = (1 << resource);
1530 int func = BP_FUNC(bp);
1531 u32 hw_lock_control_reg;
1533 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1534 "Trying to take a lock on resource %d\n", resource);
1536 /* Validating that the resource is within range */
1537 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1538 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1539 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1540 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1545 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1547 hw_lock_control_reg =
1548 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1550 /* Try to acquire the lock */
1551 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1552 lock_status = REG_RD(bp, hw_lock_control_reg);
1553 if (lock_status & resource_bit)
1556 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1557 "Failed to get a lock on resource %d\n", resource);
1562 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1564 * @bp: driver handle
1566 * Returns the recovery leader resource id according to the engine this function
1567 * belongs to. Currently only only 2 engines is supported.
1569 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1572 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1574 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1578 * bnx2x_trylock_leader_lock- try to aquire a leader lock.
1580 * @bp: driver handle
1582 * Tries to aquire a leader lock for current engine.
1584 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1586 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1589 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1592 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1594 struct bnx2x *bp = fp->bp;
1595 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1596 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1597 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1598 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1601 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1602 fp->index, cid, command, bp->state,
1603 rr_cqe->ramrod_cqe.ramrod_type);
1606 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1607 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1608 drv_cmd = BNX2X_Q_CMD_UPDATE;
1611 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1612 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1613 drv_cmd = BNX2X_Q_CMD_SETUP;
1616 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1617 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1618 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1621 case (RAMROD_CMD_ID_ETH_HALT):
1622 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1623 drv_cmd = BNX2X_Q_CMD_HALT;
1626 case (RAMROD_CMD_ID_ETH_TERMINATE):
1627 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
1628 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1631 case (RAMROD_CMD_ID_ETH_EMPTY):
1632 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1633 drv_cmd = BNX2X_Q_CMD_EMPTY;
1637 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1638 command, fp->index);
1642 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1643 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1644 /* q_obj->complete_cmd() failure means that this was
1645 * an unexpected completion.
1647 * In this case we don't want to increase the bp->spq_left
1648 * because apparently we haven't sent this command the first
1651 #ifdef BNX2X_STOP_ON_ERROR
1657 smp_mb__before_atomic_inc();
1658 atomic_inc(&bp->cq_spq_left);
1659 /* push the change in bp->spq_left and towards the memory */
1660 smp_mb__after_atomic_inc();
1662 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1664 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1665 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1666 /* if Q update ramrod is completed for last Q in AFEX vif set
1667 * flow, then ACK MCP at the end
1669 * mark pending ACK to MCP bit.
1670 * prevent case that both bits are cleared.
1671 * At the end of load/unload driver checks that
1672 * sp_state is cleaerd, and this order prevents
1675 smp_mb__before_clear_bit();
1676 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1678 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1679 smp_mb__after_clear_bit();
1681 /* schedule workqueue to send ack to MCP */
1682 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1688 void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1689 u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
1691 u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
1693 bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
1697 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1699 struct bnx2x *bp = netdev_priv(dev_instance);
1700 u16 status = bnx2x_ack_int(bp);
1705 /* Return here if interrupt is shared and it's not for us */
1706 if (unlikely(status == 0)) {
1707 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1710 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1712 #ifdef BNX2X_STOP_ON_ERROR
1713 if (unlikely(bp->panic))
1717 for_each_eth_queue(bp, i) {
1718 struct bnx2x_fastpath *fp = &bp->fp[i];
1720 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1721 if (status & mask) {
1722 /* Handle Rx or Tx according to SB id */
1723 prefetch(fp->rx_cons_sb);
1724 for_each_cos_in_tx_queue(fp, cos)
1725 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1726 prefetch(&fp->sb_running_index[SM_RX_ID]);
1727 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1732 if (CNIC_SUPPORT(bp)) {
1734 if (status & (mask | 0x1)) {
1735 struct cnic_ops *c_ops = NULL;
1737 if (likely(bp->state == BNX2X_STATE_OPEN)) {
1739 c_ops = rcu_dereference(bp->cnic_ops);
1741 c_ops->cnic_handler(bp->cnic_data,
1750 if (unlikely(status & 0x1)) {
1751 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1758 if (unlikely(status))
1759 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1768 * General service functions
1771 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1774 u32 resource_bit = (1 << resource);
1775 int func = BP_FUNC(bp);
1776 u32 hw_lock_control_reg;
1779 /* Validating that the resource is within range */
1780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1781 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1782 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1787 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1789 hw_lock_control_reg =
1790 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1793 /* Validating that the resource is not already taken */
1794 lock_status = REG_RD(bp, hw_lock_control_reg);
1795 if (lock_status & resource_bit) {
1796 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
1797 lock_status, resource_bit);
1801 /* Try for 5 second every 5ms */
1802 for (cnt = 0; cnt < 1000; cnt++) {
1803 /* Try to acquire the lock */
1804 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (lock_status & resource_bit)
1811 BNX2X_ERR("Timeout\n");
1815 int bnx2x_release_leader_lock(struct bnx2x *bp)
1817 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1820 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1823 u32 resource_bit = (1 << resource);
1824 int func = BP_FUNC(bp);
1825 u32 hw_lock_control_reg;
1827 /* Validating that the resource is within range */
1828 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1829 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1830 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1835 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1837 hw_lock_control_reg =
1838 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1841 /* Validating that the resource is currently taken */
1842 lock_status = REG_RD(bp, hw_lock_control_reg);
1843 if (!(lock_status & resource_bit)) {
1844 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
1845 lock_status, resource_bit);
1849 REG_WR(bp, hw_lock_control_reg, resource_bit);
1854 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1856 /* The GPIO should be swapped if swap register is set and active */
1857 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1858 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1859 int gpio_shift = gpio_num +
1860 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1861 u32 gpio_mask = (1 << gpio_shift);
1865 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1866 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1870 /* read GPIO value */
1871 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1873 /* get the requested pin value */
1874 if ((gpio_reg & gpio_mask) == gpio_mask)
1879 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1884 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1886 /* The GPIO should be swapped if swap register is set and active */
1887 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1888 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1889 int gpio_shift = gpio_num +
1890 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1891 u32 gpio_mask = (1 << gpio_shift);
1894 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1895 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1899 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1900 /* read GPIO and mask except the float bits */
1901 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1904 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1906 "Set GPIO %d (shift %d) -> output low\n",
1907 gpio_num, gpio_shift);
1908 /* clear FLOAT and set CLR */
1909 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1913 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1915 "Set GPIO %d (shift %d) -> output high\n",
1916 gpio_num, gpio_shift);
1917 /* clear FLOAT and set SET */
1918 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1919 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1922 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1924 "Set GPIO %d (shift %d) -> input\n",
1925 gpio_num, gpio_shift);
1927 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1934 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1935 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1940 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
1945 /* Any port swapping should be handled by caller. */
1947 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1948 /* read GPIO and mask except the float bits */
1949 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1950 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1951 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
1952 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
1958 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
1961 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1962 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
1964 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
1967 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1968 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
1970 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
1974 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
1980 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1982 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1987 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1989 /* The GPIO should be swapped if swap register is set and active */
1990 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1991 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1992 int gpio_shift = gpio_num +
1993 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1994 u32 gpio_mask = (1 << gpio_shift);
1997 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1998 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2002 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2004 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2007 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 "Clear GPIO INT %d (shift %d) -> output low\n",
2010 gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2018 "Set GPIO INT %d (shift %d) -> output high\n",
2019 gpio_num, gpio_shift);
2020 /* clear CLR and set SET */
2021 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2022 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2029 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2030 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2035 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2039 /* Only 2 SPIOs are configurable */
2040 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2041 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2050 case MISC_SPIO_OUTPUT_LOW:
2051 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2054 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2057 case MISC_SPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2061 spio_reg |= (spio << MISC_SPIO_SET_POS);
2064 case MISC_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2067 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2080 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2082 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2083 switch (bp->link_vars.ieee_fc &
2084 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2085 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2086 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2090 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2091 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2095 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2096 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2100 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2106 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2108 if (!BP_NOMCP(bp)) {
2110 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
2111 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2113 * Initialize link parameters structure variables
2114 * It is recommended to turn off RX FC for jumbo frames
2115 * for better performance
2117 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2118 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2120 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2122 bnx2x_acquire_phy_lock(bp);
2124 if (load_mode == LOAD_DIAG) {
2125 struct link_params *lp = &bp->link_params;
2126 lp->loopback_mode = LOOPBACK_XGXS;
2127 /* do PHY loopback at 10G speed, if possible */
2128 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2129 if (lp->speed_cap_mask[cfx_idx] &
2130 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2131 lp->req_line_speed[cfx_idx] =
2134 lp->req_line_speed[cfx_idx] =
2139 if (load_mode == LOAD_LOOPBACK_EXT) {
2140 struct link_params *lp = &bp->link_params;
2141 lp->loopback_mode = LOOPBACK_EXT;
2144 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2146 bnx2x_release_phy_lock(bp);
2148 bnx2x_calc_fc_adv(bp);
2150 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2151 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2152 bnx2x_link_report(bp);
2154 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2155 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2158 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2162 void bnx2x_link_set(struct bnx2x *bp)
2164 if (!BP_NOMCP(bp)) {
2165 bnx2x_acquire_phy_lock(bp);
2166 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2167 bnx2x_release_phy_lock(bp);
2169 bnx2x_calc_fc_adv(bp);
2171 BNX2X_ERR("Bootcode is missing - can not set link\n");
2174 static void bnx2x__link_reset(struct bnx2x *bp)
2176 if (!BP_NOMCP(bp)) {
2177 bnx2x_acquire_phy_lock(bp);
2178 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2179 bnx2x_release_phy_lock(bp);
2181 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2184 void bnx2x_force_link_reset(struct bnx2x *bp)
2186 bnx2x_acquire_phy_lock(bp);
2187 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2188 bnx2x_release_phy_lock(bp);
2191 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2195 if (!BP_NOMCP(bp)) {
2196 bnx2x_acquire_phy_lock(bp);
2197 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2199 bnx2x_release_phy_lock(bp);
2201 BNX2X_ERR("Bootcode is missing - can not test link\n");
2207 /* Calculates the sum of vn_min_rates.
2208 It's needed for further normalizing of the min_rates.
2210 sum of vn_min_rates.
2212 0 - if all the min_rates are 0.
2213 In the later case fainess algorithm should be deactivated.
2214 If not all min_rates are zero then those that are zeroes will be set to 1.
2216 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2217 struct cmng_init_input *input)
2222 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2223 u32 vn_cfg = bp->mf_config[vn];
2224 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2225 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2227 /* Skip hidden vns */
2228 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2230 /* If min rate is zero - set it to 1 */
2231 else if (!vn_min_rate)
2232 vn_min_rate = DEF_MIN_RATE;
2236 input->vnic_min_rate[vn] = vn_min_rate;
2239 /* if ETS or all min rates are zeros - disable fairness */
2240 if (BNX2X_IS_ETS_ENABLED(bp)) {
2241 input->flags.cmng_enables &=
2242 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2243 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2244 } else if (all_zero) {
2245 input->flags.cmng_enables &=
2246 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2248 "All MIN values are zeroes fairness will be disabled\n");
2250 input->flags.cmng_enables |=
2251 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2254 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2255 struct cmng_init_input *input)
2258 u32 vn_cfg = bp->mf_config[vn];
2260 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2263 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2266 /* maxCfg in percents of linkspeed */
2267 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2268 } else /* SD modes */
2269 /* maxCfg is absolute in 100Mb units */
2270 vn_max_rate = maxCfg * 100;
2273 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2275 input->vnic_max_rate[vn] = vn_max_rate;
2279 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2281 if (CHIP_REV_IS_SLOW(bp))
2282 return CMNG_FNS_NONE;
2284 return CMNG_FNS_MINMAX;
2286 return CMNG_FNS_NONE;
2289 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2291 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2294 return; /* what should be the default bvalue in this case */
2296 /* For 2 port configuration the absolute function number formula
2298 * abs_func = 2 * vn + BP_PORT + BP_PATH
2300 * and there are 4 functions per port
2302 * For 4 port configuration it is
2303 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2305 * and there are 2 functions per port
2307 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2308 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2310 if (func >= E1H_FUNC_MAX)
2314 MF_CFG_RD(bp, func_mf_config[func].config);
2316 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2317 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2318 bp->flags |= MF_FUNC_DIS;
2320 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2321 bp->flags &= ~MF_FUNC_DIS;
2325 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2327 struct cmng_init_input input;
2328 memset(&input, 0, sizeof(struct cmng_init_input));
2330 input.port_rate = bp->link_vars.line_speed;
2332 if (cmng_type == CMNG_FNS_MINMAX) {
2335 /* read mf conf from shmem */
2337 bnx2x_read_mf_cfg(bp);
2339 /* vn_weight_sum and enable fairness if not 0 */
2340 bnx2x_calc_vn_min(bp, &input);
2342 /* calculate and set min-max rate for each vn */
2344 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2345 bnx2x_calc_vn_max(bp, vn, &input);
2347 /* always enable rate shaping and fairness */
2348 input.flags.cmng_enables |=
2349 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2351 bnx2x_init_cmng(&input, &bp->cmng);
2355 /* rate shaping and fairness are disabled */
2357 "rate shaping and fairness are disabled\n");
2360 static void storm_memset_cmng(struct bnx2x *bp,
2361 struct cmng_init *cmng,
2365 size_t size = sizeof(struct cmng_struct_per_port);
2367 u32 addr = BAR_XSTRORM_INTMEM +
2368 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2370 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2372 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2373 int func = func_by_vn(bp, vn);
2375 addr = BAR_XSTRORM_INTMEM +
2376 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2377 size = sizeof(struct rate_shaping_vars_per_vn);
2378 __storm_memset_struct(bp, addr, size,
2379 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2381 addr = BAR_XSTRORM_INTMEM +
2382 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2383 size = sizeof(struct fairness_vars_per_vn);
2384 __storm_memset_struct(bp, addr, size,
2385 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2389 /* This function is called upon link interrupt */
2390 static void bnx2x_link_attn(struct bnx2x *bp)
2392 /* Make sure that we are synced with the current statistics */
2393 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2395 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2397 if (bp->link_vars.link_up) {
2399 /* dropless flow control */
2400 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2401 int port = BP_PORT(bp);
2402 u32 pause_enabled = 0;
2404 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2407 REG_WR(bp, BAR_USTRORM_INTMEM +
2408 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2412 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2413 struct host_port_stats *pstats;
2415 pstats = bnx2x_sp(bp, port_stats);
2416 /* reset old mac stats */
2417 memset(&(pstats->mac_stx[0]), 0,
2418 sizeof(struct mac_stx));
2420 if (bp->state == BNX2X_STATE_OPEN)
2421 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2424 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2425 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2427 if (cmng_fns != CMNG_FNS_NONE) {
2428 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2429 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2431 /* rate shaping and fairness are disabled */
2433 "single function mode without fairness\n");
2436 __bnx2x_link_report(bp);
2439 bnx2x_link_sync_notify(bp);
2442 void bnx2x__link_status_update(struct bnx2x *bp)
2444 if (bp->state != BNX2X_STATE_OPEN)
2447 /* read updated dcb configuration */
2448 bnx2x_dcbx_pmf_update(bp);
2450 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2452 if (bp->link_vars.link_up)
2453 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2455 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2457 /* indicate link status */
2458 bnx2x_link_report(bp);
2461 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2462 u16 vlan_val, u8 allowed_prio)
2464 struct bnx2x_func_state_params func_params = {0};
2465 struct bnx2x_func_afex_update_params *f_update_params =
2466 &func_params.params.afex_update;
2468 func_params.f_obj = &bp->func_obj;
2469 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2471 /* no need to wait for RAMROD completion, so don't
2472 * set RAMROD_COMP_WAIT flag
2475 f_update_params->vif_id = vifid;
2476 f_update_params->afex_default_vlan = vlan_val;
2477 f_update_params->allowed_priorities = allowed_prio;
2479 /* if ramrod can not be sent, response to MCP immediately */
2480 if (bnx2x_func_state_change(bp, &func_params) < 0)
2481 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2486 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2487 u16 vif_index, u8 func_bit_map)
2489 struct bnx2x_func_state_params func_params = {0};
2490 struct bnx2x_func_afex_viflists_params *update_params =
2491 &func_params.params.afex_viflists;
2495 /* validate only LIST_SET and LIST_GET are received from switch */
2496 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2497 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2500 func_params.f_obj = &bp->func_obj;
2501 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2503 /* set parameters according to cmd_type */
2504 update_params->afex_vif_list_command = cmd_type;
2505 update_params->vif_list_index = cpu_to_le16(vif_index);
2506 update_params->func_bit_map =
2507 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2508 update_params->func_to_clear = 0;
2510 (cmd_type == VIF_LIST_RULE_GET) ?
2511 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2512 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2514 /* if ramrod can not be sent, respond to MCP immediately for
2515 * SET and GET requests (other are not triggered from MCP)
2517 rc = bnx2x_func_state_change(bp, &func_params);
2519 bnx2x_fw_command(bp, drv_msg_code, 0);
2524 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2526 struct afex_stats afex_stats;
2527 u32 func = BP_ABS_FUNC(bp);
2534 u32 addr_to_write, vifid, addrs, stats_type, i;
2536 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2537 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2539 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2540 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2543 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2544 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2545 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2547 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2549 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2553 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2554 addr_to_write = SHMEM2_RD(bp,
2555 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2556 stats_type = SHMEM2_RD(bp,
2557 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2560 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2563 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2565 /* write response to scratchpad, for MCP */
2566 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2567 REG_WR(bp, addr_to_write + i*sizeof(u32),
2568 *(((u32 *)(&afex_stats))+i));
2570 /* send ack message to MCP */
2571 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2574 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2575 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2576 bp->mf_config[BP_VN(bp)] = mf_config;
2578 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2581 /* if VIF_SET is "enabled" */
2582 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2583 /* set rate limit directly to internal RAM */
2584 struct cmng_init_input cmng_input;
2585 struct rate_shaping_vars_per_vn m_rs_vn;
2586 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2587 u32 addr = BAR_XSTRORM_INTMEM +
2588 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2590 bp->mf_config[BP_VN(bp)] = mf_config;
2592 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2593 m_rs_vn.vn_counter.rate =
2594 cmng_input.vnic_max_rate[BP_VN(bp)];
2595 m_rs_vn.vn_counter.quota =
2596 (m_rs_vn.vn_counter.rate *
2597 RS_PERIODIC_TIMEOUT_USEC) / 8;
2599 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2601 /* read relevant values from mf_cfg struct in shmem */
2603 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2604 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2605 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2607 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2608 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2609 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2610 vlan_prio = (mf_config &
2611 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2612 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2613 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2616 func_mf_config[func].afex_config) &
2617 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2618 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2621 func_mf_config[func].afex_config) &
2622 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2623 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2625 /* send ramrod to FW, return in case of failure */
2626 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2630 bp->afex_def_vlan_tag = vlan_val;
2631 bp->afex_vlan_mode = vlan_mode;
2633 /* notify link down because BP->flags is disabled */
2634 bnx2x_link_report(bp);
2636 /* send INVALID VIF ramrod to FW */
2637 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2639 /* Reset the default afex VLAN */
2640 bp->afex_def_vlan_tag = -1;
2645 static void bnx2x_pmf_update(struct bnx2x *bp)
2647 int port = BP_PORT(bp);
2651 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2654 * We need the mb() to ensure the ordering between the writing to
2655 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2659 /* queue a periodic task */
2660 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2662 bnx2x_dcbx_pmf_update(bp);
2664 /* enable nig attention */
2665 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2666 if (bp->common.int_block == INT_BLOCK_HC) {
2667 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2668 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2669 } else if (!CHIP_IS_E1x(bp)) {
2670 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2671 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2674 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2682 * General service functions
2685 /* send the MCP a request, block until there is a reply */
2686 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2688 int mb_idx = BP_FW_MB_IDX(bp);
2692 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2694 mutex_lock(&bp->fw_mb_mutex);
2696 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2697 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2699 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2700 (command | seq), param);
2703 /* let the FW do it's magic ... */
2706 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2708 /* Give the FW up to 5 second (500*10ms) */
2709 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2711 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2712 cnt*delay, rc, seq);
2714 /* is this a reply to our command? */
2715 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2716 rc &= FW_MSG_CODE_MASK;
2719 BNX2X_ERR("FW failed to respond!\n");
2723 mutex_unlock(&bp->fw_mb_mutex);
2729 static void storm_memset_func_cfg(struct bnx2x *bp,
2730 struct tstorm_eth_function_common_config *tcfg,
2733 size_t size = sizeof(struct tstorm_eth_function_common_config);
2735 u32 addr = BAR_TSTRORM_INTMEM +
2736 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2738 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2741 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2743 if (CHIP_IS_E1x(bp)) {
2744 struct tstorm_eth_function_common_config tcfg = {0};
2746 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2749 /* Enable the function in the FW */
2750 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2751 storm_memset_func_en(bp, p->func_id, 1);
2754 if (p->func_flgs & FUNC_FLG_SPQ) {
2755 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2756 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2757 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2762 * bnx2x_get_tx_only_flags - Return common flags
2766 * @zero_stats TRUE if statistics zeroing is needed
2768 * Return the flags that are common for the Tx-only and not normal connections.
2770 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2771 struct bnx2x_fastpath *fp,
2774 unsigned long flags = 0;
2776 /* PF driver will always initialize the Queue to an ACTIVE state */
2777 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2779 /* tx only connections collect statistics (on the same index as the
2780 * parent connection). The statistics are zeroed when the parent
2781 * connection is initialized.
2784 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2786 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2792 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2793 struct bnx2x_fastpath *fp,
2796 unsigned long flags = 0;
2798 /* calculate other queue flags */
2800 __set_bit(BNX2X_Q_FLG_OV, &flags);
2802 if (IS_FCOE_FP(fp)) {
2803 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2804 /* For FCoE - force usage of default priority (for afex) */
2805 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2808 if (!fp->disable_tpa) {
2809 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2810 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2811 if (fp->mode == TPA_MODE_GRO)
2812 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
2816 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2817 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
2820 /* Always set HW VLAN stripping */
2821 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2823 /* configure silent vlan removal */
2825 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2828 return flags | bnx2x_get_common_flags(bp, fp, true);
2831 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
2832 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
2835 gen_init->stat_id = bnx2x_stats_id(fp);
2836 gen_init->spcl_id = fp->cl_id;
2838 /* Always use mini-jumbo MTU for FCoE L2 ring */
2840 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2842 gen_init->mtu = bp->dev->mtu;
2844 gen_init->cos = cos;
2847 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2848 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2849 struct bnx2x_rxq_setup_params *rxq_init)
2853 u16 tpa_agg_size = 0;
2855 if (!fp->disable_tpa) {
2856 pause->sge_th_lo = SGE_TH_LO(bp);
2857 pause->sge_th_hi = SGE_TH_HI(bp);
2859 /* validate SGE ring has enough to cross high threshold */
2860 WARN_ON(bp->dropless_fc &&
2861 pause->sge_th_hi + FW_PREFETCH_CNT >
2862 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
2864 tpa_agg_size = min_t(u32,
2865 (min_t(u32, 8, MAX_SKB_FRAGS) *
2866 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2867 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2869 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2870 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2871 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2875 /* pause - not for e1 */
2876 if (!CHIP_IS_E1(bp)) {
2877 pause->bd_th_lo = BD_TH_LO(bp);
2878 pause->bd_th_hi = BD_TH_HI(bp);
2880 pause->rcq_th_lo = RCQ_TH_LO(bp);
2881 pause->rcq_th_hi = RCQ_TH_HI(bp);
2883 * validate that rings have enough entries to cross
2886 WARN_ON(bp->dropless_fc &&
2887 pause->bd_th_hi + FW_PREFETCH_CNT >
2889 WARN_ON(bp->dropless_fc &&
2890 pause->rcq_th_hi + FW_PREFETCH_CNT >
2891 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
2897 rxq_init->dscr_map = fp->rx_desc_mapping;
2898 rxq_init->sge_map = fp->rx_sge_mapping;
2899 rxq_init->rcq_map = fp->rx_comp_mapping;
2900 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2902 /* This should be a maximum number of data bytes that may be
2903 * placed on the BD (not including paddings).
2905 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
2906 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
2908 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2909 rxq_init->tpa_agg_sz = tpa_agg_size;
2910 rxq_init->sge_buf_sz = sge_sz;
2911 rxq_init->max_sges_pkt = max_sge;
2912 rxq_init->rss_engine_id = BP_FUNC(bp);
2913 rxq_init->mcast_engine_id = BP_FUNC(bp);
2915 /* Maximum number or simultaneous TPA aggregation for this Queue.
2917 * For PF Clients it should be the maximum avaliable number.
2918 * VF driver(s) may want to define it to a smaller value.
2920 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
2922 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2923 rxq_init->fw_sb_id = fp->fw_sb_id;
2926 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2928 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2929 /* configure silent vlan removal
2930 * if multi function mode is afex, then mask default vlan
2932 if (IS_MF_AFEX(bp)) {
2933 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
2934 rxq_init->silent_removal_mask = VLAN_VID_MASK;
2938 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
2939 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
2942 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
2943 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
2944 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2945 txq_init->fw_sb_id = fp->fw_sb_id;
2948 * set the tss leading client id for TX classfication ==
2949 * leading RSS client id
2951 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
2953 if (IS_FCOE_FP(fp)) {
2954 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2955 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2959 static void bnx2x_pf_init(struct bnx2x *bp)
2961 struct bnx2x_func_init_params func_init = {0};
2962 struct event_ring_data eq_data = { {0} };
2965 if (!CHIP_IS_E1x(bp)) {
2966 /* reset IGU PF statistics: MSIX + ATTN */
2968 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2969 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2970 (CHIP_MODE_IS_4_PORT(bp) ?
2971 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2973 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2974 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2975 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2976 (CHIP_MODE_IS_4_PORT(bp) ?
2977 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2980 /* function setup flags */
2981 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2983 /* This flag is relevant for E1x only.
2984 * E2 doesn't have a TPA configuration in a function level.
2986 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2988 func_init.func_flgs = flags;
2989 func_init.pf_id = BP_FUNC(bp);
2990 func_init.func_id = BP_FUNC(bp);
2991 func_init.spq_map = bp->spq_mapping;
2992 func_init.spq_prod = bp->spq_prod_idx;
2994 bnx2x_func_init(bp, &func_init);
2996 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2999 * Congestion management values depend on the link rate
3000 * There is no active link so initial link rate is set to 10 Gbps.
3001 * When the link comes up The congestion management values are
3002 * re-calculated according to the actual link rate.
3004 bp->link_vars.line_speed = SPEED_10000;
3005 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3007 /* Only the PMF sets the HW */
3009 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3011 /* init Event Queue */
3012 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3013 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3014 eq_data.producer = bp->eq_prod;
3015 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3016 eq_data.sb_id = DEF_SB_ID;
3017 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3021 static void bnx2x_e1h_disable(struct bnx2x *bp)
3023 int port = BP_PORT(bp);
3025 bnx2x_tx_disable(bp);
3027 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3030 static void bnx2x_e1h_enable(struct bnx2x *bp)
3032 int port = BP_PORT(bp);
3034 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3036 /* Tx queue should be only reenabled */
3037 netif_tx_wake_all_queues(bp->dev);
3040 * Should not call netif_carrier_on since it will be called if the link
3041 * is up when checking for link state
3045 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3047 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3049 struct eth_stats_info *ether_stat =
3050 &bp->slowpath->drv_info_to_mcp.ether_stat;
3052 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3053 ETH_STAT_INFO_VERSION_LEN);
3055 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3056 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3057 ether_stat->mac_local);
3059 ether_stat->mtu_size = bp->dev->mtu;
3061 if (bp->dev->features & NETIF_F_RXCSUM)
3062 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3063 if (bp->dev->features & NETIF_F_TSO)
3064 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3065 ether_stat->feature_flags |= bp->common.boot_mode;
3067 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3069 ether_stat->txq_size = bp->tx_ring_size;
3070 ether_stat->rxq_size = bp->rx_ring_size;
3073 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3075 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3076 struct fcoe_stats_info *fcoe_stat =
3077 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3079 if (!CNIC_LOADED(bp))
3082 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT,
3083 bp->fip_mac, ETH_ALEN);
3085 fcoe_stat->qos_priority =
3086 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3088 /* insert FCoE stats from ramrod response */
3090 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3091 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3092 tstorm_queue_statistics;
3094 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3095 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3096 xstorm_queue_statistics;
3098 struct fcoe_statistics_params *fw_fcoe_stat =
3099 &bp->fw_stats_data->fcoe;
3101 ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
3102 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3104 ADD_64(fcoe_stat->rx_bytes_hi,
3105 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3106 fcoe_stat->rx_bytes_lo,
3107 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3109 ADD_64(fcoe_stat->rx_bytes_hi,
3110 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3111 fcoe_stat->rx_bytes_lo,
3112 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3114 ADD_64(fcoe_stat->rx_bytes_hi,
3115 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3116 fcoe_stat->rx_bytes_lo,
3117 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3119 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3120 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3122 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3123 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3125 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3126 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3128 ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
3129 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3131 ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
3132 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3134 ADD_64(fcoe_stat->tx_bytes_hi,
3135 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3136 fcoe_stat->tx_bytes_lo,
3137 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3139 ADD_64(fcoe_stat->tx_bytes_hi,
3140 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3141 fcoe_stat->tx_bytes_lo,
3142 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3144 ADD_64(fcoe_stat->tx_bytes_hi,
3145 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3146 fcoe_stat->tx_bytes_lo,
3147 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3149 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3150 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3152 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3153 fcoe_q_xstorm_stats->ucast_pkts_sent);
3155 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3156 fcoe_q_xstorm_stats->bcast_pkts_sent);
3158 ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
3159 fcoe_q_xstorm_stats->mcast_pkts_sent);
3162 /* ask L5 driver to add data to the struct */
3163 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3166 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3168 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3169 struct iscsi_stats_info *iscsi_stat =
3170 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3172 if (!CNIC_LOADED(bp))
3175 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT,
3176 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
3178 iscsi_stat->qos_priority =
3179 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3181 /* ask L5 driver to add data to the struct */
3182 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3185 /* called due to MCP event (on pmf):
3186 * reread new bandwidth configuration
3188 * notify others function about the change
3190 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3192 if (bp->link_vars.link_up) {
3193 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3194 bnx2x_link_sync_notify(bp);
3196 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3199 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3201 bnx2x_config_mf_bw(bp);
3202 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3205 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3207 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3208 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3211 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3213 enum drv_info_opcode op_code;
3214 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3216 /* if drv_info version supported by MFW doesn't match - send NACK */
3217 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3218 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3222 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3223 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3225 memset(&bp->slowpath->drv_info_to_mcp, 0,
3226 sizeof(union drv_info_to_mcp));
3229 case ETH_STATS_OPCODE:
3230 bnx2x_drv_info_ether_stat(bp);
3232 case FCOE_STATS_OPCODE:
3233 bnx2x_drv_info_fcoe_stat(bp);
3235 case ISCSI_STATS_OPCODE:
3236 bnx2x_drv_info_iscsi_stat(bp);
3239 /* if op code isn't supported - send NACK */
3240 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3244 /* if we got drv_info attn from MFW then these fields are defined in
3247 SHMEM2_WR(bp, drv_info_host_addr_lo,
3248 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3249 SHMEM2_WR(bp, drv_info_host_addr_hi,
3250 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3252 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3255 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3257 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3259 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3262 * This is the only place besides the function initialization
3263 * where the bp->flags can change so it is done without any
3266 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3267 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3268 bp->flags |= MF_FUNC_DIS;
3270 bnx2x_e1h_disable(bp);
3272 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3273 bp->flags &= ~MF_FUNC_DIS;
3275 bnx2x_e1h_enable(bp);
3277 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3279 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3280 bnx2x_config_mf_bw(bp);
3281 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3284 /* Report results to MCP */
3286 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3288 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3291 /* must be called under the spq lock */
3292 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3294 struct eth_spe *next_spe = bp->spq_prod_bd;
3296 if (bp->spq_prod_bd == bp->spq_last_bd) {
3297 bp->spq_prod_bd = bp->spq;
3298 bp->spq_prod_idx = 0;
3299 DP(BNX2X_MSG_SP, "end of spq\n");
3307 /* must be called under the spq lock */
3308 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3310 int func = BP_FUNC(bp);
3313 * Make sure that BD data is updated before writing the producer:
3314 * BD data is written to the memory, the producer is read from the
3315 * memory, thus we need a full memory barrier to ensure the ordering.
3319 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3325 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3327 * @cmd: command to check
3328 * @cmd_type: command type
3330 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3332 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3333 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3334 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3335 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3336 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3337 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3338 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3347 * bnx2x_sp_post - place a single command on an SP ring
3349 * @bp: driver handle
3350 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3351 * @cid: SW CID the command is related to
3352 * @data_hi: command private data address (high 32 bits)
3353 * @data_lo: command private data address (low 32 bits)
3354 * @cmd_type: command type (e.g. NONE, ETH)
3356 * SP data is handled as if it's always an address pair, thus data fields are
3357 * not swapped to little endian in upper functions. Instead this function swaps
3358 * data as if it's two u32 fields.
3360 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3361 u32 data_hi, u32 data_lo, int cmd_type)
3363 struct eth_spe *spe;
3365 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3367 #ifdef BNX2X_STOP_ON_ERROR
3368 if (unlikely(bp->panic)) {
3369 BNX2X_ERR("Can't post SP when there is panic\n");
3374 spin_lock_bh(&bp->spq_lock);
3377 if (!atomic_read(&bp->eq_spq_left)) {
3378 BNX2X_ERR("BUG! EQ ring full!\n");
3379 spin_unlock_bh(&bp->spq_lock);
3383 } else if (!atomic_read(&bp->cq_spq_left)) {
3384 BNX2X_ERR("BUG! SPQ ring full!\n");
3385 spin_unlock_bh(&bp->spq_lock);
3390 spe = bnx2x_sp_get_next(bp);
3392 /* CID needs port number to be encoded int it */
3393 spe->hdr.conn_and_cmd_data =
3394 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3397 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3399 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3400 SPE_HDR_FUNCTION_ID);
3402 spe->hdr.type = cpu_to_le16(type);
3404 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3405 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3408 * It's ok if the actual decrement is issued towards the memory
3409 * somewhere between the spin_lock and spin_unlock. Thus no
3410 * more explict memory barrier is needed.
3413 atomic_dec(&bp->eq_spq_left);
3415 atomic_dec(&bp->cq_spq_left);
3419 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3420 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3421 (u32)(U64_LO(bp->spq_mapping) +
3422 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3423 HW_CID(bp, cid), data_hi, data_lo, type,
3424 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3426 bnx2x_sp_prod_update(bp);
3427 spin_unlock_bh(&bp->spq_lock);
3431 /* acquire split MCP access lock register */
3432 static int bnx2x_acquire_alr(struct bnx2x *bp)
3438 for (j = 0; j < 1000; j++) {
3440 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3441 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3442 if (val & (1L << 31))
3447 if (!(val & (1L << 31))) {
3448 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3455 /* release split MCP access lock register */
3456 static void bnx2x_release_alr(struct bnx2x *bp)
3458 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
3461 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3462 #define BNX2X_DEF_SB_IDX 0x0002
3464 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3466 struct host_sp_status_block *def_sb = bp->def_status_blk;
3469 barrier(); /* status block is written to by the chip */
3470 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3471 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3472 rc |= BNX2X_DEF_SB_ATT_IDX;
3475 if (bp->def_idx != def_sb->sp_sb.running_index) {
3476 bp->def_idx = def_sb->sp_sb.running_index;
3477 rc |= BNX2X_DEF_SB_IDX;
3480 /* Do not reorder: indecies reading should complete before handling */
3486 * slow path service functions
3489 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3491 int port = BP_PORT(bp);
3492 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3493 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3494 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3495 NIG_REG_MASK_INTERRUPT_PORT0;
3500 if (bp->attn_state & asserted)
3501 BNX2X_ERR("IGU ERROR\n");
3503 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3504 aeu_mask = REG_RD(bp, aeu_addr);
3506 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3507 aeu_mask, asserted);
3508 aeu_mask &= ~(asserted & 0x3ff);
3509 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3511 REG_WR(bp, aeu_addr, aeu_mask);
3512 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3514 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3515 bp->attn_state |= asserted;
3516 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3518 if (asserted & ATTN_HARD_WIRED_MASK) {
3519 if (asserted & ATTN_NIG_FOR_FUNC) {
3521 bnx2x_acquire_phy_lock(bp);
3523 /* save nig interrupt mask */
3524 nig_mask = REG_RD(bp, nig_int_mask_addr);
3526 /* If nig_mask is not set, no need to call the update
3530 REG_WR(bp, nig_int_mask_addr, 0);
3532 bnx2x_link_attn(bp);
3535 /* handle unicore attn? */
3537 if (asserted & ATTN_SW_TIMER_4_FUNC)
3538 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3540 if (asserted & GPIO_2_FUNC)
3541 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3543 if (asserted & GPIO_3_FUNC)
3544 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3546 if (asserted & GPIO_4_FUNC)
3547 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3550 if (asserted & ATTN_GENERAL_ATTN_1) {
3551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3554 if (asserted & ATTN_GENERAL_ATTN_2) {
3555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3558 if (asserted & ATTN_GENERAL_ATTN_3) {
3559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3563 if (asserted & ATTN_GENERAL_ATTN_4) {
3564 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3565 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3567 if (asserted & ATTN_GENERAL_ATTN_5) {
3568 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3569 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3571 if (asserted & ATTN_GENERAL_ATTN_6) {
3572 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3573 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3577 } /* if hardwired */
3579 if (bp->common.int_block == INT_BLOCK_HC)
3580 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3581 COMMAND_REG_ATTN_BITS_SET);
3583 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3585 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3586 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3587 REG_WR(bp, reg_addr, asserted);
3589 /* now set back the mask */
3590 if (asserted & ATTN_NIG_FOR_FUNC) {
3591 REG_WR(bp, nig_int_mask_addr, nig_mask);
3592 bnx2x_release_phy_lock(bp);
3596 static void bnx2x_fan_failure(struct bnx2x *bp)
3598 int port = BP_PORT(bp);
3600 /* mark the failure */
3603 dev_info.port_hw_config[port].external_phy_config);
3605 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3606 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3607 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3610 /* log the failure */
3611 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3612 "Please contact OEM Support for assistance\n");
3615 * Scheudle device reset (unload)
3616 * This is due to some boards consuming sufficient power when driver is
3617 * up to overheat if fan fails.
3619 smp_mb__before_clear_bit();
3620 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3621 smp_mb__after_clear_bit();
3622 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3626 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3628 int port = BP_PORT(bp);
3632 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3633 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3635 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3637 val = REG_RD(bp, reg_offset);
3638 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3639 REG_WR(bp, reg_offset, val);
3641 BNX2X_ERR("SPIO5 hw attention\n");
3643 /* Fan failure attention */
3644 bnx2x_hw_reset_phy(&bp->link_params);
3645 bnx2x_fan_failure(bp);
3648 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3649 bnx2x_acquire_phy_lock(bp);
3650 bnx2x_handle_module_detect_int(&bp->link_params);
3651 bnx2x_release_phy_lock(bp);
3654 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3656 val = REG_RD(bp, reg_offset);
3657 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3658 REG_WR(bp, reg_offset, val);
3660 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3661 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3666 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3670 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3672 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3673 BNX2X_ERR("DB hw attention 0x%x\n", val);
3674 /* DORQ discard attention */
3676 BNX2X_ERR("FATAL error from DORQ\n");
3679 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3681 int port = BP_PORT(bp);
3684 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3685 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3687 val = REG_RD(bp, reg_offset);
3688 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3689 REG_WR(bp, reg_offset, val);
3691 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3692 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3697 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3701 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3703 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3704 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3705 /* CFC error attention */
3707 BNX2X_ERR("FATAL error from CFC\n");
3710 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3711 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3712 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3713 /* RQ_USDMDP_FIFO_OVERFLOW */
3715 BNX2X_ERR("FATAL error from PXP\n");
3717 if (!CHIP_IS_E1x(bp)) {
3718 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3719 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3723 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3725 int port = BP_PORT(bp);
3728 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3731 val = REG_RD(bp, reg_offset);
3732 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3733 REG_WR(bp, reg_offset, val);
3735 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3736 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3741 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3745 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3747 if (attn & BNX2X_PMF_LINK_ASSERT) {
3748 int func = BP_FUNC(bp);
3750 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3751 bnx2x_read_mf_cfg(bp);
3752 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3753 func_mf_config[BP_ABS_FUNC(bp)].config);
3755 func_mb[BP_FW_MB_IDX(bp)].drv_status);
3756 if (val & DRV_STATUS_DCC_EVENT_MASK)
3758 (val & DRV_STATUS_DCC_EVENT_MASK));
3760 if (val & DRV_STATUS_SET_MF_BW)
3761 bnx2x_set_mf_bw(bp);
3763 if (val & DRV_STATUS_DRV_INFO_REQ)
3764 bnx2x_handle_drv_info_req(bp);
3765 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3766 bnx2x_pmf_update(bp);
3769 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3770 bp->dcbx_enabled > 0)
3771 /* start dcbx state machine */
3772 bnx2x_dcbx_set_params(bp,
3773 BNX2X_DCBX_STATE_NEG_RECEIVED);
3774 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3775 bnx2x_handle_afex_cmd(bp,
3776 val & DRV_STATUS_AFEX_EVENT_MASK);
3777 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3778 bnx2x_handle_eee_event(bp);
3779 if (bp->link_vars.periodic_flags &
3780 PERIODIC_FLAGS_LINK_EVENT) {
3781 /* sync with link */
3782 bnx2x_acquire_phy_lock(bp);
3783 bp->link_vars.periodic_flags &=
3784 ~PERIODIC_FLAGS_LINK_EVENT;
3785 bnx2x_release_phy_lock(bp);
3787 bnx2x_link_sync_notify(bp);
3788 bnx2x_link_report(bp);
3790 /* Always call it here: bnx2x_link_report() will
3791 * prevent the link indication duplication.
3793 bnx2x__link_status_update(bp);
3794 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3796 BNX2X_ERR("MC assert!\n");
3797 bnx2x_mc_assert(bp);
3798 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3804 } else if (attn & BNX2X_MCP_ASSERT) {
3806 BNX2X_ERR("MCP assert!\n");
3807 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3811 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3814 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3815 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3816 if (attn & BNX2X_GRC_TIMEOUT) {
3817 val = CHIP_IS_E1(bp) ? 0 :
3818 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
3819 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3821 if (attn & BNX2X_GRC_RSV) {
3822 val = CHIP_IS_E1(bp) ? 0 :
3823 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
3824 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3826 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3832 * 0-7 - Engine0 load counter.
3833 * 8-15 - Engine1 load counter.
3834 * 16 - Engine0 RESET_IN_PROGRESS bit.
3835 * 17 - Engine1 RESET_IN_PROGRESS bit.
3836 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
3838 * 19 - Engine1 ONE_IS_LOADED.
3839 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
3840 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
3841 * just the one belonging to its engine).
3844 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
3846 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
3847 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
3848 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
3849 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
3850 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
3851 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
3852 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
3855 * Set the GLOBAL_RESET bit.
3857 * Should be run under rtnl lock
3859 void bnx2x_set_reset_global(struct bnx2x *bp)
3862 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3863 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3864 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
3865 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3869 * Clear the GLOBAL_RESET bit.
3871 * Should be run under rtnl lock
3873 static void bnx2x_clear_reset_global(struct bnx2x *bp)
3876 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3877 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3878 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
3879 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3883 * Checks the GLOBAL_RESET bit.
3885 * should be run under rtnl lock
3887 static bool bnx2x_reset_is_global(struct bnx2x *bp)
3889 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3891 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3892 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
3896 * Clear RESET_IN_PROGRESS bit for the current engine.
3898 * Should be run under rtnl lock
3900 static void bnx2x_set_reset_done(struct bnx2x *bp)
3903 u32 bit = BP_PATH(bp) ?
3904 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3905 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3906 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3910 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3912 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3916 * Set RESET_IN_PROGRESS for the current engine.
3918 * should be run under rtnl lock
3920 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3923 u32 bit = BP_PATH(bp) ?
3924 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3925 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3926 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3930 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3931 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3935 * Checks the RESET_IN_PROGRESS bit for the given engine.
3936 * should be run under rtnl lock
3938 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
3940 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3942 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
3944 /* return false if bit is set */
3945 return (val & bit) ? false : true;
3949 * set pf load for the current pf.
3951 * should be run under rtnl lock
3953 void bnx2x_set_pf_load(struct bnx2x *bp)
3956 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3957 BNX2X_PATH0_LOAD_CNT_MASK;
3958 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3959 BNX2X_PATH0_LOAD_CNT_SHIFT;
3961 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3962 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3964 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
3966 /* get the current counter value */
3967 val1 = (val & mask) >> shift;
3969 /* set bit of that PF */
3970 val1 |= (1 << bp->pf_num);
3972 /* clear the old value */
3975 /* set the new one */
3976 val |= ((val1 << shift) & mask);
3978 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
3979 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3983 * bnx2x_clear_pf_load - clear pf load mark
3985 * @bp: driver handle
3987 * Should be run under rtnl lock.
3988 * Decrements the load counter for the current engine. Returns
3989 * whether other functions are still loaded
3991 bool bnx2x_clear_pf_load(struct bnx2x *bp)
3994 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
3995 BNX2X_PATH0_LOAD_CNT_MASK;
3996 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
3997 BNX2X_PATH0_LOAD_CNT_SHIFT;
3999 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4000 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4001 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4003 /* get the current counter value */
4004 val1 = (val & mask) >> shift;
4006 /* clear bit of that PF */
4007 val1 &= ~(1 << bp->pf_num);
4009 /* clear the old value */
4012 /* set the new one */
4013 val |= ((val1 << shift) & mask);
4015 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4016 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4021 * Read the load status for the current engine.
4023 * should be run under rtnl lock
4025 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4027 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4028 BNX2X_PATH0_LOAD_CNT_MASK);
4029 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4030 BNX2X_PATH0_LOAD_CNT_SHIFT);
4031 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4033 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4035 val = (val & mask) >> shift;
4037 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4043 static void _print_next_block(int idx, const char *blk)
4045 pr_cont("%s%s", idx ? ", " : "", blk);
4048 static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4053 for (i = 0; sig; i++) {
4054 cur_bit = ((u32)0x1 << i);
4055 if (sig & cur_bit) {
4057 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4059 _print_next_block(par_num++, "BRB");
4061 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4063 _print_next_block(par_num++, "PARSER");
4065 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4067 _print_next_block(par_num++, "TSDM");
4069 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4071 _print_next_block(par_num++,
4074 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4076 _print_next_block(par_num++, "TCM");
4078 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4080 _print_next_block(par_num++, "TSEMI");
4082 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4084 _print_next_block(par_num++, "XPB");
4096 static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4097 bool *global, bool print)
4101 for (i = 0; sig; i++) {
4102 cur_bit = ((u32)0x1 << i);
4103 if (sig & cur_bit) {
4105 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4107 _print_next_block(par_num++, "PBF");
4109 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4111 _print_next_block(par_num++, "QM");
4113 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4115 _print_next_block(par_num++, "TM");
4117 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4119 _print_next_block(par_num++, "XSDM");
4121 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4123 _print_next_block(par_num++, "XCM");
4125 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4127 _print_next_block(par_num++, "XSEMI");
4129 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4131 _print_next_block(par_num++,
4134 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4136 _print_next_block(par_num++, "NIG");
4138 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4140 _print_next_block(par_num++,
4144 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4146 _print_next_block(par_num++, "DEBUG");
4148 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4150 _print_next_block(par_num++, "USDM");
4152 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4154 _print_next_block(par_num++, "UCM");
4156 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4158 _print_next_block(par_num++, "USEMI");
4160 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4162 _print_next_block(par_num++, "UPB");
4164 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4166 _print_next_block(par_num++, "CSDM");
4168 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4170 _print_next_block(par_num++, "CCM");
4182 static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4187 for (i = 0; sig; i++) {
4188 cur_bit = ((u32)0x1 << i);
4189 if (sig & cur_bit) {
4191 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4193 _print_next_block(par_num++, "CSEMI");
4195 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4197 _print_next_block(par_num++, "PXP");
4199 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4201 _print_next_block(par_num++,
4202 "PXPPCICLOCKCLIENT");
4204 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4206 _print_next_block(par_num++, "CFC");
4208 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4210 _print_next_block(par_num++, "CDU");
4212 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4214 _print_next_block(par_num++, "DMAE");
4216 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4218 _print_next_block(par_num++, "IGU");
4220 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4222 _print_next_block(par_num++, "MISC");
4234 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4235 bool *global, bool print)
4239 for (i = 0; sig; i++) {
4240 cur_bit = ((u32)0x1 << i);
4241 if (sig & cur_bit) {
4243 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4245 _print_next_block(par_num++, "MCP ROM");
4248 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4250 _print_next_block(par_num++,
4254 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4256 _print_next_block(par_num++,
4260 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4262 _print_next_block(par_num++,
4276 static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4281 for (i = 0; sig; i++) {
4282 cur_bit = ((u32)0x1 << i);
4283 if (sig & cur_bit) {
4285 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4287 _print_next_block(par_num++, "PGLUE_B");
4289 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4291 _print_next_block(par_num++, "ATC");
4303 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4306 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4307 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4308 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4309 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4310 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4312 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4313 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4314 sig[0] & HW_PRTY_ASSERT_SET_0,
4315 sig[1] & HW_PRTY_ASSERT_SET_1,
4316 sig[2] & HW_PRTY_ASSERT_SET_2,
4317 sig[3] & HW_PRTY_ASSERT_SET_3,
4318 sig[4] & HW_PRTY_ASSERT_SET_4);
4321 "Parity errors detected in blocks: ");
4322 par_num = bnx2x_check_blocks_with_parity0(
4323 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4324 par_num = bnx2x_check_blocks_with_parity1(
4325 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4326 par_num = bnx2x_check_blocks_with_parity2(
4327 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4328 par_num = bnx2x_check_blocks_with_parity3(
4329 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4330 par_num = bnx2x_check_blocks_with_parity4(
4331 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4342 * bnx2x_chk_parity_attn - checks for parity attentions.
4344 * @bp: driver handle
4345 * @global: true if there was a global attention
4346 * @print: show parity attention in syslog
4348 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4350 struct attn_route attn = { {0} };
4351 int port = BP_PORT(bp);
4353 attn.sig[0] = REG_RD(bp,
4354 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4356 attn.sig[1] = REG_RD(bp,
4357 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4359 attn.sig[2] = REG_RD(bp,
4360 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4362 attn.sig[3] = REG_RD(bp,
4363 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4366 if (!CHIP_IS_E1x(bp))
4367 attn.sig[4] = REG_RD(bp,
4368 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4371 return bnx2x_parity_attn(bp, global, print, attn.sig);
4375 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4378 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4380 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4381 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4382 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4383 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4386 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4387 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4388 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4391 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4394 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4396 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4397 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4398 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4399 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4400 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4401 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4403 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4404 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4405 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4406 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4407 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4408 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4409 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4410 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4411 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4412 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4413 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4414 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4415 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4416 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4417 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4420 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4421 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4422 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4423 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4424 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4429 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4431 struct attn_route attn, *group_mask;
4432 int port = BP_PORT(bp);
4437 bool global = false;
4439 /* need to take HW lock because MCP or other port might also
4440 try to handle this event */
4441 bnx2x_acquire_alr(bp);
4443 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4444 #ifndef BNX2X_STOP_ON_ERROR
4445 bp->recovery_state = BNX2X_RECOVERY_INIT;
4446 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4447 /* Disable HW interrupts */
4448 bnx2x_int_disable(bp);
4449 /* In case of parity errors don't handle attentions so that
4450 * other function would "see" parity errors.
4455 bnx2x_release_alr(bp);
4459 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4460 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4461 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4462 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4463 if (!CHIP_IS_E1x(bp))
4465 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4469 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4470 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4472 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4473 if (deasserted & (1 << index)) {
4474 group_mask = &bp->attn_group[index];
4476 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4478 group_mask->sig[0], group_mask->sig[1],
4479 group_mask->sig[2], group_mask->sig[3],
4480 group_mask->sig[4]);
4482 bnx2x_attn_int_deasserted4(bp,
4483 attn.sig[4] & group_mask->sig[4]);
4484 bnx2x_attn_int_deasserted3(bp,
4485 attn.sig[3] & group_mask->sig[3]);
4486 bnx2x_attn_int_deasserted1(bp,
4487 attn.sig[1] & group_mask->sig[1]);
4488 bnx2x_attn_int_deasserted2(bp,
4489 attn.sig[2] & group_mask->sig[2]);
4490 bnx2x_attn_int_deasserted0(bp,
4491 attn.sig[0] & group_mask->sig[0]);
4495 bnx2x_release_alr(bp);
4497 if (bp->common.int_block == INT_BLOCK_HC)
4498 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4499 COMMAND_REG_ATTN_BITS_CLR);
4501 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4504 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4505 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4506 REG_WR(bp, reg_addr, val);
4508 if (~bp->attn_state & deasserted)
4509 BNX2X_ERR("IGU ERROR\n");
4511 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4512 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4514 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4515 aeu_mask = REG_RD(bp, reg_addr);
4517 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4518 aeu_mask, deasserted);
4519 aeu_mask |= (deasserted & 0x3ff);
4520 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4522 REG_WR(bp, reg_addr, aeu_mask);
4523 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4525 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4526 bp->attn_state &= ~deasserted;
4527 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4530 static void bnx2x_attn_int(struct bnx2x *bp)
4532 /* read local copy of bits */
4533 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4535 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4537 u32 attn_state = bp->attn_state;
4539 /* look for changed bits */
4540 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4541 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4544 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4545 attn_bits, attn_ack, asserted, deasserted);
4547 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4548 BNX2X_ERR("BAD attention state\n");
4550 /* handle bits that were raised */
4552 bnx2x_attn_int_asserted(bp, asserted);
4555 bnx2x_attn_int_deasserted(bp, deasserted);
4558 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4559 u16 index, u8 op, u8 update)
4561 u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4563 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4567 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4569 /* No memory barriers */
4570 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4571 mmiowb(); /* keep prod updates ordered */
4574 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4575 union event_ring_elem *elem)
4577 u8 err = elem->message.error;
4579 if (!bp->cnic_eth_dev.starting_cid ||
4580 (cid < bp->cnic_eth_dev.starting_cid &&
4581 cid != bp->cnic_eth_dev.iscsi_l2_cid))
4584 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4586 if (unlikely(err)) {
4588 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4590 bnx2x_panic_dump(bp);
4592 bnx2x_cnic_cfc_comp(bp, cid, err);
4596 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4598 struct bnx2x_mcast_ramrod_params rparam;
4601 memset(&rparam, 0, sizeof(rparam));
4603 rparam.mcast_obj = &bp->mcast_obj;
4605 netif_addr_lock_bh(bp->dev);
4607 /* Clear pending state for the last command */
4608 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4610 /* If there are pending mcast commands - send them */
4611 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4612 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4614 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4618 netif_addr_unlock_bh(bp->dev);
4621 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4622 union event_ring_elem *elem)
4624 unsigned long ramrod_flags = 0;
4626 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4627 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4629 /* Always push next commands out, don't wait here */
4630 __set_bit(RAMROD_CONT, &ramrod_flags);
4632 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
4633 case BNX2X_FILTER_MAC_PENDING:
4634 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4635 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4636 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4638 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4641 case BNX2X_FILTER_MCAST_PENDING:
4642 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
4643 /* This is only relevant for 57710 where multicast MACs are
4644 * configured as unicast MACs using the same ramrod.
4646 bnx2x_handle_mcast_eqe(bp);
4649 BNX2X_ERR("Unsupported classification command: %d\n",
4650 elem->message.data.eth_event.echo);
4654 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4657 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4659 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4663 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4665 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4667 netif_addr_lock_bh(bp->dev);
4669 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4671 /* Send rx_mode command again if was requested */
4672 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4673 bnx2x_set_storm_rx_mode(bp);
4674 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4676 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4677 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4679 bnx2x_set_iscsi_eth_rx_mode(bp, false);
4681 netif_addr_unlock_bh(bp->dev);
4684 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4685 union event_ring_elem *elem)
4687 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4689 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4690 elem->message.data.vif_list_event.func_bit_map);
4691 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4692 elem->message.data.vif_list_event.func_bit_map);
4693 } else if (elem->message.data.vif_list_event.echo ==
4694 VIF_LIST_RULE_SET) {
4695 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4696 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4700 /* called with rtnl_lock */
4701 static void bnx2x_after_function_update(struct bnx2x *bp)
4704 struct bnx2x_fastpath *fp;
4705 struct bnx2x_queue_state_params queue_params = {NULL};
4706 struct bnx2x_queue_update_params *q_update_params =
4707 &queue_params.params.update;
4709 /* Send Q update command with afex vlan removal values for all Qs */
4710 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4712 /* set silent vlan removal values according to vlan mode */
4713 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4714 &q_update_params->update_flags);
4715 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4716 &q_update_params->update_flags);
4717 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4719 /* in access mode mark mask and value are 0 to strip all vlans */
4720 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4721 q_update_params->silent_removal_value = 0;
4722 q_update_params->silent_removal_mask = 0;
4724 q_update_params->silent_removal_value =
4725 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4726 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4729 for_each_eth_queue(bp, q) {
4730 /* Set the appropriate Queue object */
4732 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4734 /* send the ramrod */
4735 rc = bnx2x_queue_state_change(bp, &queue_params);
4737 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4742 fp = &bp->fp[FCOE_IDX(bp)];
4743 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
4745 /* clear pending completion bit */
4746 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4748 /* mark latest Q bit */
4749 smp_mb__before_clear_bit();
4750 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4751 smp_mb__after_clear_bit();
4753 /* send Q update ramrod for FCoE Q */
4754 rc = bnx2x_queue_state_change(bp, &queue_params);
4756 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4759 /* If no FCoE ring - ACK MCP now */
4760 bnx2x_link_report(bp);
4761 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4765 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4766 struct bnx2x *bp, u32 cid)
4768 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
4770 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
4771 return &bnx2x_fcoe_sp_obj(bp, q_obj);
4773 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
4776 static void bnx2x_eq_int(struct bnx2x *bp)
4778 u16 hw_cons, sw_cons, sw_prod;
4779 union event_ring_elem *elem;
4784 struct bnx2x_queue_sp_obj *q_obj;
4785 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
4786 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
4788 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
4790 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
4791 * when we get the the next-page we nned to adjust so the loop
4792 * condition below will be met. The next element is the size of a
4793 * regular element and hence incrementing by 1
4795 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
4798 /* This function may never run in parallel with itself for a
4799 * specific bp, thus there is no need in "paired" read memory
4802 sw_cons = bp->eq_cons;
4803 sw_prod = bp->eq_prod;
4805 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
4806 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
4808 for (; sw_cons != hw_cons;
4809 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
4812 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
4814 cid = SW_CID(elem->message.data.cfc_del_event.cid);
4815 opcode = elem->message.opcode;
4818 /* handle eq element */
4820 case EVENT_RING_OPCODE_STAT_QUERY:
4821 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
4822 "got statistics comp event %d\n",
4824 /* nothing to do with stats comp */
4827 case EVENT_RING_OPCODE_CFC_DEL:
4828 /* handle according to cid range */
4830 * we may want to verify here that the bp state is
4834 "got delete ramrod for MULTI[%d]\n", cid);
4836 if (CNIC_LOADED(bp) &&
4837 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
4840 q_obj = bnx2x_cid_to_q_obj(bp, cid);
4842 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
4849 case EVENT_RING_OPCODE_STOP_TRAFFIC:
4850 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
4851 if (f_obj->complete_cmd(bp, f_obj,
4852 BNX2X_F_CMD_TX_STOP))
4854 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
4857 case EVENT_RING_OPCODE_START_TRAFFIC:
4858 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
4859 if (f_obj->complete_cmd(bp, f_obj,
4860 BNX2X_F_CMD_TX_START))
4862 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4865 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4866 echo = elem->message.data.function_update_event.echo;
4867 if (echo == SWITCH_UPDATE) {
4868 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4869 "got FUNC_SWITCH_UPDATE ramrod\n");
4870 if (f_obj->complete_cmd(
4871 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
4875 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4876 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4877 f_obj->complete_cmd(bp, f_obj,
4878 BNX2X_F_CMD_AFEX_UPDATE);
4880 /* We will perform the Queues update from
4881 * sp_rtnl task as all Queue SP operations
4882 * should run under rtnl_lock.
4884 smp_mb__before_clear_bit();
4885 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4886 &bp->sp_rtnl_state);
4887 smp_mb__after_clear_bit();
4889 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4894 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
4895 f_obj->complete_cmd(bp, f_obj,
4896 BNX2X_F_CMD_AFEX_VIFLISTS);
4897 bnx2x_after_afex_vif_lists(bp, elem);
4899 case EVENT_RING_OPCODE_FUNCTION_START:
4900 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4901 "got FUNC_START ramrod\n");
4902 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
4907 case EVENT_RING_OPCODE_FUNCTION_STOP:
4908 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4909 "got FUNC_STOP ramrod\n");
4910 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
4916 switch (opcode | bp->state) {
4917 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4919 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
4920 BNX2X_STATE_OPENING_WAIT4_PORT):
4921 cid = elem->message.data.eth_event.echo &
4923 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
4925 rss_raw->clear_pending(rss_raw);
4928 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
4929 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
4930 case (EVENT_RING_OPCODE_SET_MAC |
4931 BNX2X_STATE_CLOSING_WAIT4_HALT):
4932 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4934 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4936 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
4937 BNX2X_STATE_CLOSING_WAIT4_HALT):
4938 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
4939 bnx2x_handle_classification_eqe(bp, elem);
4942 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4944 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4946 case (EVENT_RING_OPCODE_MULTICAST_RULES |
4947 BNX2X_STATE_CLOSING_WAIT4_HALT):
4948 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
4949 bnx2x_handle_mcast_eqe(bp);
4952 case (EVENT_RING_OPCODE_FILTERS_RULES |
4954 case (EVENT_RING_OPCODE_FILTERS_RULES |
4956 case (EVENT_RING_OPCODE_FILTERS_RULES |
4957 BNX2X_STATE_CLOSING_WAIT4_HALT):
4958 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
4959 bnx2x_handle_rx_mode_eqe(bp);
4962 /* unknown event log error and continue */
4963 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
4964 elem->message.opcode, bp->state);
4970 smp_mb__before_atomic_inc();
4971 atomic_add(spqe_cnt, &bp->eq_spq_left);
4973 bp->eq_cons = sw_cons;
4974 bp->eq_prod = sw_prod;
4975 /* Make sure that above mem writes were issued towards the memory */
4978 /* update producer */
4979 bnx2x_update_eq_prod(bp, bp->eq_prod);
4982 static void bnx2x_sp_task(struct work_struct *work)
4984 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
4987 status = bnx2x_update_dsb_idx(bp);
4988 /* if (status == 0) */
4989 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
4991 DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
4994 if (status & BNX2X_DEF_SB_ATT_IDX) {
4996 status &= ~BNX2X_DEF_SB_ATT_IDX;
4999 /* SP events: STAT_QUERY and others */
5000 if (status & BNX2X_DEF_SB_IDX) {
5001 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5003 if (FCOE_INIT(bp) &&
5004 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5006 * Prevent local bottom-halves from running as
5007 * we are going to change the local NAPI list.
5010 napi_schedule(&bnx2x_fcoe(bp, napi));
5014 /* Handle EQ completions */
5017 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5018 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5020 status &= ~BNX2X_DEF_SB_IDX;
5023 if (unlikely(status))
5024 DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
5027 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5028 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5030 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5031 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5033 bnx2x_link_report(bp);
5034 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5038 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5040 struct net_device *dev = dev_instance;
5041 struct bnx2x *bp = netdev_priv(dev);
5043 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5044 IGU_INT_DISABLE, 0);
5046 #ifdef BNX2X_STOP_ON_ERROR
5047 if (unlikely(bp->panic))
5051 if (CNIC_LOADED(bp)) {
5052 struct cnic_ops *c_ops;
5055 c_ops = rcu_dereference(bp->cnic_ops);
5057 c_ops->cnic_handler(bp->cnic_data, NULL);
5061 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
5066 /* end of slow path */
5069 void bnx2x_drv_pulse(struct bnx2x *bp)
5071 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5072 bp->fw_drv_pulse_wr_seq);
5076 static void bnx2x_timer(unsigned long data)
5078 struct bnx2x *bp = (struct bnx2x *) data;
5080 if (!netif_running(bp->dev))
5083 if (!BP_NOMCP(bp)) {
5084 int mb_idx = BP_FW_MB_IDX(bp);
5088 ++bp->fw_drv_pulse_wr_seq;
5089 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5090 /* TBD - add SYSTEM_TIME */
5091 drv_pulse = bp->fw_drv_pulse_wr_seq;
5092 bnx2x_drv_pulse(bp);
5094 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5095 MCP_PULSE_SEQ_MASK);
5096 /* The delta between driver pulse and mcp response
5097 * should be 1 (before mcp response) or 0 (after mcp response)
5099 if ((drv_pulse != mcp_pulse) &&
5100 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5101 /* someone lost a heartbeat... */
5102 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5103 drv_pulse, mcp_pulse);
5107 if (bp->state == BNX2X_STATE_OPEN)
5108 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5110 mod_timer(&bp->timer, jiffies + bp->current_interval);
5113 /* end of Statistics */
5118 * nic init service functions
5121 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5124 if (!(len%4) && !(addr%4))
5125 for (i = 0; i < len; i += 4)
5126 REG_WR(bp, addr + i, fill);
5128 for (i = 0; i < len; i++)
5129 REG_WR8(bp, addr + i, fill);
5133 /* helper: writes FP SP data to FW - data_size in dwords */
5134 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5140 for (index = 0; index < data_size; index++)
5141 REG_WR(bp, BAR_CSTRORM_INTMEM +
5142 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5144 *(sb_data_p + index));
5147 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5151 struct hc_status_block_data_e2 sb_data_e2;
5152 struct hc_status_block_data_e1x sb_data_e1x;
5154 /* disable the function first */
5155 if (!CHIP_IS_E1x(bp)) {
5156 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5157 sb_data_e2.common.state = SB_DISABLED;
5158 sb_data_e2.common.p_func.vf_valid = false;
5159 sb_data_p = (u32 *)&sb_data_e2;
5160 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5162 memset(&sb_data_e1x, 0,
5163 sizeof(struct hc_status_block_data_e1x));
5164 sb_data_e1x.common.state = SB_DISABLED;
5165 sb_data_e1x.common.p_func.vf_valid = false;
5166 sb_data_p = (u32 *)&sb_data_e1x;
5167 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5169 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5171 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5172 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5173 CSTORM_STATUS_BLOCK_SIZE);
5174 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5175 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5176 CSTORM_SYNC_BLOCK_SIZE);
5179 /* helper: writes SP SB data to FW */
5180 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5181 struct hc_sp_status_block_data *sp_sb_data)
5183 int func = BP_FUNC(bp);
5185 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5186 REG_WR(bp, BAR_CSTRORM_INTMEM +
5187 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5189 *((u32 *)sp_sb_data + i));
5192 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5194 int func = BP_FUNC(bp);
5195 struct hc_sp_status_block_data sp_sb_data;
5196 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5198 sp_sb_data.state = SB_DISABLED;
5199 sp_sb_data.p_func.vf_valid = false;
5201 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5203 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5204 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5205 CSTORM_SP_STATUS_BLOCK_SIZE);
5206 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5207 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5208 CSTORM_SP_SYNC_BLOCK_SIZE);
5213 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5214 int igu_sb_id, int igu_seg_id)
5216 hc_sm->igu_sb_id = igu_sb_id;
5217 hc_sm->igu_seg_id = igu_seg_id;
5218 hc_sm->timer_value = 0xFF;
5219 hc_sm->time_to_expire = 0xFFFFFFFF;
5223 /* allocates state machine ids. */
5224 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5226 /* zero out state machine indices */
5228 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5231 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5232 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5233 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5234 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5238 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5239 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5242 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5243 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5244 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5245 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5246 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5247 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5248 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5249 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5252 static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5253 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5257 struct hc_status_block_data_e2 sb_data_e2;
5258 struct hc_status_block_data_e1x sb_data_e1x;
5259 struct hc_status_block_sm *hc_sm_p;
5263 if (CHIP_INT_MODE_IS_BC(bp))
5264 igu_seg_id = HC_SEG_ACCESS_NORM;
5266 igu_seg_id = IGU_SEG_ACCESS_NORM;
5268 bnx2x_zero_fp_sb(bp, fw_sb_id);
5270 if (!CHIP_IS_E1x(bp)) {
5271 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5272 sb_data_e2.common.state = SB_ENABLED;
5273 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5274 sb_data_e2.common.p_func.vf_id = vfid;
5275 sb_data_e2.common.p_func.vf_valid = vf_valid;
5276 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5277 sb_data_e2.common.same_igu_sb_1b = true;
5278 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5279 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5280 hc_sm_p = sb_data_e2.common.state_machine;
5281 sb_data_p = (u32 *)&sb_data_e2;
5282 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5283 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5285 memset(&sb_data_e1x, 0,
5286 sizeof(struct hc_status_block_data_e1x));
5287 sb_data_e1x.common.state = SB_ENABLED;
5288 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5289 sb_data_e1x.common.p_func.vf_id = 0xff;
5290 sb_data_e1x.common.p_func.vf_valid = false;
5291 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5292 sb_data_e1x.common.same_igu_sb_1b = true;
5293 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5294 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5295 hc_sm_p = sb_data_e1x.common.state_machine;
5296 sb_data_p = (u32 *)&sb_data_e1x;
5297 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5298 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5301 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5302 igu_sb_id, igu_seg_id);
5303 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5304 igu_sb_id, igu_seg_id);
5306 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5308 /* write indecies to HW */
5309 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5312 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5313 u16 tx_usec, u16 rx_usec)
5315 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5317 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5318 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5320 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5321 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5323 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5324 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5328 static void bnx2x_init_def_sb(struct bnx2x *bp)
5330 struct host_sp_status_block *def_sb = bp->def_status_blk;
5331 dma_addr_t mapping = bp->def_status_blk_mapping;
5332 int igu_sp_sb_index;
5334 int port = BP_PORT(bp);
5335 int func = BP_FUNC(bp);
5336 int reg_offset, reg_offset_en5;
5339 struct hc_sp_status_block_data sp_sb_data;
5340 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5342 if (CHIP_INT_MODE_IS_BC(bp)) {
5343 igu_sp_sb_index = DEF_SB_IGU_ID;
5344 igu_seg_id = HC_SEG_ACCESS_DEF;
5346 igu_sp_sb_index = bp->igu_dsb_id;
5347 igu_seg_id = IGU_SEG_ACCESS_DEF;
5351 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5352 atten_status_block);
5353 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5357 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5358 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5359 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5360 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5361 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5363 /* take care of sig[0]..sig[4] */
5364 for (sindex = 0; sindex < 4; sindex++)
5365 bp->attn_group[index].sig[sindex] =
5366 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5368 if (!CHIP_IS_E1x(bp))
5370 * enable5 is separate from the rest of the registers,
5371 * and therefore the address skip is 4
5372 * and not 16 between the different groups
5374 bp->attn_group[index].sig[4] = REG_RD(bp,
5375 reg_offset_en5 + 0x4*index);
5377 bp->attn_group[index].sig[4] = 0;
5380 if (bp->common.int_block == INT_BLOCK_HC) {
5381 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5382 HC_REG_ATTN_MSG0_ADDR_L);
5384 REG_WR(bp, reg_offset, U64_LO(section));
5385 REG_WR(bp, reg_offset + 4, U64_HI(section));
5386 } else if (!CHIP_IS_E1x(bp)) {
5387 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5388 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5391 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5394 bnx2x_zero_sp_sb(bp);
5396 sp_sb_data.state = SB_ENABLED;
5397 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5398 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5399 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5400 sp_sb_data.igu_seg_id = igu_seg_id;
5401 sp_sb_data.p_func.pf_id = func;
5402 sp_sb_data.p_func.vnic_id = BP_VN(bp);
5403 sp_sb_data.p_func.vf_id = 0xff;
5405 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5407 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5410 void bnx2x_update_coalesce(struct bnx2x *bp)
5414 for_each_eth_queue(bp, i)
5415 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5416 bp->tx_ticks, bp->rx_ticks);
5419 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5421 spin_lock_init(&bp->spq_lock);
5422 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5424 bp->spq_prod_idx = 0;
5425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5426 bp->spq_prod_bd = bp->spq;
5427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5430 static void bnx2x_init_eq_ring(struct bnx2x *bp)
5433 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5434 union event_ring_elem *elem =
5435 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5437 elem->next_page.addr.hi =
5438 cpu_to_le32(U64_HI(bp->eq_mapping +
5439 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5440 elem->next_page.addr.lo =
5441 cpu_to_le32(U64_LO(bp->eq_mapping +
5442 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5445 bp->eq_prod = NUM_EQ_DESC;
5446 bp->eq_cons_sb = BNX2X_EQ_INDEX;
5447 /* we want a warning message before it gets rought... */
5448 atomic_set(&bp->eq_spq_left,
5449 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5453 /* called with netif_addr_lock_bh() */
5454 void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5455 unsigned long rx_mode_flags,
5456 unsigned long rx_accept_flags,
5457 unsigned long tx_accept_flags,
5458 unsigned long ramrod_flags)
5460 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5463 memset(&ramrod_param, 0, sizeof(ramrod_param));
5465 /* Prepare ramrod parameters */
5466 ramrod_param.cid = 0;
5467 ramrod_param.cl_id = cl_id;
5468 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5469 ramrod_param.func_id = BP_FUNC(bp);
5471 ramrod_param.pstate = &bp->sp_state;
5472 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5474 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5475 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5477 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5479 ramrod_param.ramrod_flags = ramrod_flags;
5480 ramrod_param.rx_mode_flags = rx_mode_flags;
5482 ramrod_param.rx_accept_flags = rx_accept_flags;
5483 ramrod_param.tx_accept_flags = tx_accept_flags;
5485 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5487 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5492 /* called with netif_addr_lock_bh() */
5493 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5495 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5496 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5500 /* Configure rx_mode of FCoE Queue */
5501 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5503 switch (bp->rx_mode) {
5504 case BNX2X_RX_MODE_NONE:
5506 * 'drop all' supersedes any accept flags that may have been
5507 * passed to the function.
5510 case BNX2X_RX_MODE_NORMAL:
5511 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5512 __set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
5513 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5515 /* internal switching mode */
5516 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5517 __set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
5518 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5521 case BNX2X_RX_MODE_ALLMULTI:
5522 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5523 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5524 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5526 /* internal switching mode */
5527 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5528 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5529 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5532 case BNX2X_RX_MODE_PROMISC:
5533 /* According to deffinition of SI mode, iface in promisc mode
5534 * should receive matched and unmatched (in resolution of port)
5537 __set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
5538 __set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
5539 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
5540 __set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
5542 /* internal switching mode */
5543 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
5544 __set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
5547 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
5549 __set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
5553 BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
5557 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5558 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
5559 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
5562 __set_bit(RAMROD_RX, &ramrod_flags);
5563 __set_bit(RAMROD_TX, &ramrod_flags);
5565 bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
5566 tx_accept_flags, ramrod_flags);
5569 static void bnx2x_init_internal_common(struct bnx2x *bp)
5575 * In switch independent mode, the TSTORM needs to accept
5576 * packets that failed classification, since approximate match
5577 * mac addresses aren't written to NIG LLH
5579 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5580 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5581 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5582 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5583 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5585 /* Zero this manually as its initialization is
5586 currently missing in the initTool */
5587 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5588 REG_WR(bp, BAR_USTRORM_INTMEM +
5589 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5590 if (!CHIP_IS_E1x(bp)) {
5591 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5592 CHIP_INT_MODE_IS_BC(bp) ?
5593 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5597 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5599 switch (load_code) {
5600 case FW_MSG_CODE_DRV_LOAD_COMMON:
5601 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5602 bnx2x_init_internal_common(bp);
5605 case FW_MSG_CODE_DRV_LOAD_PORT:
5609 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5610 /* internal memory per function is
5611 initialized inside bnx2x_pf_init */
5615 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5620 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
5622 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
5625 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5627 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
5630 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5632 if (CHIP_IS_E1x(fp->bp))
5633 return BP_L_ID(fp->bp) + fp->index;
5634 else /* We want Client ID to be the same as IGU SB ID for 57712 */
5635 return bnx2x_fp_igu_sb_id(fp);
5638 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5640 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
5642 unsigned long q_type = 0;
5643 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
5644 fp->rx_queue = fp_idx;
5646 fp->cl_id = bnx2x_fp_cl_id(fp);
5647 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5648 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
5649 /* qZone id equals to FW (per path) client id */
5650 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
5653 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
5655 /* Setup SB indicies */
5656 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5658 /* Configure Queue State object */
5659 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5660 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
5662 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5665 for_each_cos_in_tx_queue(fp, cos) {
5666 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5667 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5668 FP_COS_TO_TXQ(fp, cos, bp),
5669 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5670 cids[cos] = fp->txdata_ptr[cos]->cid;
5673 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5674 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
5675 bnx2x_sp_mapping(bp, q_rdata), q_type);
5678 * Configure classification DBs: Always enable Tx switching
5680 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
5682 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
5683 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
5685 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
5686 fp->fw_sb_id, fp->igu_sb_id);
5688 bnx2x_update_fpsb_idx(fp);
5691 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5695 for (i = 1; i <= NUM_TX_RINGS; i++) {
5696 struct eth_tx_next_bd *tx_next_bd =
5697 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5699 tx_next_bd->addr_hi =
5700 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5701 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5702 tx_next_bd->addr_lo =
5703 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5704 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5707 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5708 txdata->tx_db.data.zero_fill1 = 0;
5709 txdata->tx_db.data.prod = 0;
5711 txdata->tx_pkt_prod = 0;
5712 txdata->tx_pkt_cons = 0;
5713 txdata->tx_bd_prod = 0;
5714 txdata->tx_bd_cons = 0;
5718 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5722 for_each_tx_queue_cnic(bp, i)
5723 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
5725 static void bnx2x_init_tx_rings(struct bnx2x *bp)
5730 for_each_eth_queue(bp, i)
5731 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5732 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
5735 void bnx2x_nic_init_cnic(struct bnx2x *bp)
5738 bnx2x_init_fcoe_fp(bp);
5740 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
5741 BNX2X_VF_ID_INVALID, false,
5742 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
5744 /* ensure status block indices were read */
5746 bnx2x_init_rx_rings_cnic(bp);
5747 bnx2x_init_tx_rings_cnic(bp);
5754 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5758 for_each_eth_queue(bp, i)
5759 bnx2x_init_eth_fp(bp, i);
5760 /* Initialize MOD_ABS interrupts */
5761 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
5762 bp->common.shmem_base, bp->common.shmem2_base,
5764 /* ensure status block indices were read */
5767 bnx2x_init_def_sb(bp);
5768 bnx2x_update_dsb_idx(bp);
5769 bnx2x_init_rx_rings(bp);
5770 bnx2x_init_tx_rings(bp);
5771 bnx2x_init_sp_ring(bp);
5772 bnx2x_init_eq_ring(bp);
5773 bnx2x_init_internal(bp, load_code);
5775 bnx2x_stats_init(bp);
5777 /* flush all before enabling interrupts */
5781 bnx2x_int_enable(bp);
5783 /* Check for SPIO5 */
5784 bnx2x_attn_int_deasserted0(bp,
5785 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5786 AEU_INPUTS_ATTN_BITS_SPIO5);
5789 /* end of nic init */
5792 * gzip service functions
5795 static int bnx2x_gunzip_init(struct bnx2x *bp)
5797 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5798 &bp->gunzip_mapping, GFP_KERNEL);
5799 if (bp->gunzip_buf == NULL)
5802 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5803 if (bp->strm == NULL)
5806 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
5807 if (bp->strm->workspace == NULL)
5817 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5818 bp->gunzip_mapping);
5819 bp->gunzip_buf = NULL;
5822 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
5826 static void bnx2x_gunzip_end(struct bnx2x *bp)
5829 vfree(bp->strm->workspace);
5834 if (bp->gunzip_buf) {
5835 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5836 bp->gunzip_mapping);
5837 bp->gunzip_buf = NULL;
5841 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5845 /* check gzip header */
5846 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5847 BNX2X_ERR("Bad gzip header\n");
5855 if (zbuf[3] & FNAME)
5856 while ((zbuf[n++] != 0) && (n < len));
5858 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5859 bp->strm->avail_in = len - n;
5860 bp->strm->next_out = bp->gunzip_buf;
5861 bp->strm->avail_out = FW_BUF_SIZE;
5863 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5867 rc = zlib_inflate(bp->strm, Z_FINISH);
5868 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5869 netdev_err(bp->dev, "Firmware decompression error: %s\n",
5872 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5873 if (bp->gunzip_outlen & 0x3)
5875 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
5877 bp->gunzip_outlen >>= 2;
5879 zlib_inflateEnd(bp->strm);
5881 if (rc == Z_STREAM_END)
5887 /* nic load/unload */
5890 * General service functions
5893 /* send a NIG loopback debug packet */
5894 static void bnx2x_lb_pckt(struct bnx2x *bp)
5898 /* Ethernet source and destination addresses */
5899 wb_write[0] = 0x55555555;
5900 wb_write[1] = 0x55555555;
5901 wb_write[2] = 0x20; /* SOP */
5902 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5904 /* NON-IP protocol */
5905 wb_write[0] = 0x09000000;
5906 wb_write[1] = 0x55555555;
5907 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5908 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5911 /* some of the internal memories
5912 * are not directly readable from the driver
5913 * to test them we send debug packets
5915 static int bnx2x_int_mem_test(struct bnx2x *bp)
5921 if (CHIP_REV_IS_FPGA(bp))
5923 else if (CHIP_REV_IS_EMUL(bp))
5928 /* Disable inputs of parser neighbor blocks */
5929 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5930 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5931 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5932 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5934 /* Write 0 to parser credits for CFC search request */
5935 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5937 /* send Ethernet packet */
5940 /* TODO do i reset NIG statistic? */
5941 /* Wait until NIG register shows 1 packet of size 0x10 */
5942 count = 1000 * factor;
5945 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5946 val = *bnx2x_sp(bp, wb_data[0]);
5954 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5958 /* Wait until PRS register shows 1 packet */
5959 count = 1000 * factor;
5961 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5969 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5973 /* Reset and init BRB, PRS */
5974 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5976 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5978 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
5979 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
5981 DP(NETIF_MSG_HW, "part2\n");
5983 /* Disable inputs of parser neighbor blocks */
5984 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5985 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5986 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5987 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5989 /* Write 0 to parser credits for CFC search request */
5990 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5992 /* send 10 Ethernet packets */
5993 for (i = 0; i < 10; i++)
5996 /* Wait until NIG register shows 10 + 1
5997 packets of size 11*0x10 = 0xb0 */
5998 count = 1000 * factor;
6001 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6002 val = *bnx2x_sp(bp, wb_data[0]);
6010 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6014 /* Wait until PRS register shows 2 packets */
6015 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6017 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6019 /* Write 1 to parser credits for CFC search request */
6020 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6022 /* Wait until PRS register shows 3 packets */
6023 msleep(10 * factor);
6024 /* Wait until NIG register shows 1 packet of size 0x10 */
6025 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6027 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6029 /* clear NIG EOP FIFO */
6030 for (i = 0; i < 11; i++)
6031 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6032 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6034 BNX2X_ERR("clear of NIG failed\n");
6038 /* Reset and init BRB, PRS, NIG */
6039 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6041 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6043 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6044 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6045 if (!CNIC_SUPPORT(bp))
6047 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6049 /* Enable inputs of parser neighbor blocks */
6050 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6051 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6052 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6053 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6055 DP(NETIF_MSG_HW, "done\n");
6060 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6062 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6063 if (!CHIP_IS_E1x(bp))
6064 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6066 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6067 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6068 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6070 * mask read length error interrupts in brb for parser
6071 * (parsing unit and 'checksum and crc' unit)
6072 * these errors are legal (PU reads fixed length and CAC can cause
6073 * read length error on truncated packets)
6075 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6076 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6077 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6078 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6079 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6080 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6081 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6082 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6083 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6084 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6085 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6086 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6087 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6088 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6089 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6090 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6091 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6092 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6093 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6095 if (CHIP_REV_IS_FPGA(bp))
6096 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6097 else if (!CHIP_IS_E1x(bp))
6098 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
6099 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
6100 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
6101 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
6102 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
6103 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
6105 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6106 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6107 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6108 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6109 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6111 if (!CHIP_IS_E1x(bp))
6112 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6113 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6115 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6116 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6117 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6118 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
6121 static void bnx2x_reset_common(struct bnx2x *bp)
6126 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6129 if (CHIP_IS_E3(bp)) {
6130 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6131 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6134 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6137 static void bnx2x_setup_dmae(struct bnx2x *bp)
6140 spin_lock_init(&bp->dmae_lock);
6143 static void bnx2x_init_pxp(struct bnx2x *bp)
6146 int r_order, w_order;
6148 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6149 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6150 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6152 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6154 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6158 bnx2x_init_pxp_arb(bp, r_order, w_order);
6161 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6171 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6172 SHARED_HW_CFG_FAN_FAILURE_MASK;
6174 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6178 * The fan failure mechanism is usually related to the PHY type since
6179 * the power consumption of the board is affected by the PHY. Currently,
6180 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6182 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6183 for (port = PORT_0; port < PORT_MAX; port++) {
6185 bnx2x_fan_failure_det_req(
6187 bp->common.shmem_base,
6188 bp->common.shmem2_base,
6192 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6194 if (is_required == 0)
6197 /* Fan failure is indicated by SPIO 5 */
6198 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6200 /* set to active low mode */
6201 val = REG_RD(bp, MISC_REG_SPIO_INT);
6202 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6203 REG_WR(bp, MISC_REG_SPIO_INT, val);
6205 /* enable interrupt to signal the IGU */
6206 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6207 val |= MISC_SPIO_SPIO5;
6208 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6211 static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
6217 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
6220 switch (BP_ABS_FUNC(bp)) {
6222 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
6225 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
6228 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
6231 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
6234 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
6237 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
6240 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
6243 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
6249 REG_WR(bp, offset, pretend_func_num);
6251 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
6254 void bnx2x_pf_disable(struct bnx2x *bp)
6256 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6257 val &= ~IGU_PF_CONF_FUNC_EN;
6259 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6260 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6261 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6264 static void bnx2x__common_init_phy(struct bnx2x *bp)
6266 u32 shmem_base[2], shmem2_base[2];
6267 /* Avoid common init in case MFW supports LFA */
6268 if (SHMEM2_RD(bp, size) >
6269 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6271 shmem_base[0] = bp->common.shmem_base;
6272 shmem2_base[0] = bp->common.shmem2_base;
6273 if (!CHIP_IS_E1x(bp)) {
6275 SHMEM2_RD(bp, other_shmem_base_addr);
6277 SHMEM2_RD(bp, other_shmem2_base_addr);
6279 bnx2x_acquire_phy_lock(bp);
6280 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6281 bp->common.chip_id);
6282 bnx2x_release_phy_lock(bp);
6286 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6288 * @bp: driver handle
6290 static int bnx2x_init_hw_common(struct bnx2x *bp)
6294 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
6297 * take the UNDI lock to protect undi_unload flow from accessing
6298 * registers while we're resetting the chip
6300 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6302 bnx2x_reset_common(bp);
6303 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6306 if (CHIP_IS_E3(bp)) {
6307 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6308 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6310 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6312 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6314 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6316 if (!CHIP_IS_E1x(bp)) {
6320 * 4-port mode or 2-port mode we need to turn of master-enable
6321 * for everyone, after that, turn it back on for self.
6322 * so, we disregard multi-function or not, and always disable
6323 * for all functions on the given path, this means 0,2,4,6 for
6324 * path 0 and 1,3,5,7 for path 1
6326 for (abs_func_id = BP_PATH(bp);
6327 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6328 if (abs_func_id == BP_ABS_FUNC(bp)) {
6330 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6335 bnx2x_pretend_func(bp, abs_func_id);
6336 /* clear pf enable */
6337 bnx2x_pf_disable(bp);
6338 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6342 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6343 if (CHIP_IS_E1(bp)) {
6344 /* enable HW interrupt from PXP on USDM overflow
6345 bit 16 on INT_MASK_0 */
6346 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6349 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6353 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6354 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6355 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6356 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6357 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6358 /* make sure this value is 0 */
6359 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6361 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6362 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6363 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6364 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6365 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6368 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6370 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6371 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6373 /* let the HW do it's magic ... */
6375 /* finish PXP init */
6376 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6378 BNX2X_ERR("PXP2 CFG failed\n");
6381 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6383 BNX2X_ERR("PXP2 RD_INIT failed\n");
6387 /* Timers bug workaround E2 only. We need to set the entire ILT to
6388 * have entries with value "0" and valid bit on.
6389 * This needs to be done by the first PF that is loaded in a path
6390 * (i.e. common phase)
6392 if (!CHIP_IS_E1x(bp)) {
6393 /* In E2 there is a bug in the timers block that can cause function 6 / 7
6394 * (i.e. vnic3) to start even if it is marked as "scan-off".
6395 * This occurs when a different function (func2,3) is being marked
6396 * as "scan-off". Real-life scenario for example: if a driver is being
6397 * load-unloaded while func6,7 are down. This will cause the timer to access
6398 * the ilt, translate to a logical address and send a request to read/write.
6399 * Since the ilt for the function that is down is not valid, this will cause
6400 * a translation error which is unrecoverable.
6401 * The Workaround is intended to make sure that when this happens nothing fatal
6402 * will occur. The workaround:
6403 * 1. First PF driver which loads on a path will:
6404 * a. After taking the chip out of reset, by using pretend,
6405 * it will write "0" to the following registers of
6407 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6408 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6409 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6410 * And for itself it will write '1' to
6411 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6412 * dmae-operations (writing to pram for example.)
6413 * note: can be done for only function 6,7 but cleaner this
6415 * b. Write zero+valid to the entire ILT.
6416 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
6417 * VNIC3 (of that port). The range allocated will be the
6418 * entire ILT. This is needed to prevent ILT range error.
6419 * 2. Any PF driver load flow:
6420 * a. ILT update with the physical addresses of the allocated
6422 * b. Wait 20msec. - note that this timeout is needed to make
6423 * sure there are no requests in one of the PXP internal
6424 * queues with "old" ILT addresses.
6425 * c. PF enable in the PGLC.
6426 * d. Clear the was_error of the PF in the PGLC. (could have
6427 * occured while driver was down)
6428 * e. PF enable in the CFC (WEAK + STRONG)
6429 * f. Timers scan enable
6430 * 3. PF driver unload flow:
6431 * a. Clear the Timers scan_en.
6432 * b. Polling for scan_on=0 for that PF.
6433 * c. Clear the PF enable bit in the PXP.
6434 * d. Clear the PF enable in the CFC (WEAK + STRONG)
6435 * e. Write zero+valid to all ILT entries (The valid bit must
6437 * f. If this is VNIC 3 of a port then also init
6438 * first_timers_ilt_entry to zero and last_timers_ilt_entry
6439 * to the last enrty in the ILT.
6442 * Currently the PF error in the PGLC is non recoverable.
6443 * In the future the there will be a recovery routine for this error.
6444 * Currently attention is masked.
6445 * Having an MCP lock on the load/unload process does not guarantee that
6446 * there is no Timer disable during Func6/7 enable. This is because the
6447 * Timers scan is currently being cleared by the MCP on FLR.
6448 * Step 2.d can be done only for PF6/7 and the driver can also check if
6449 * there is error before clearing it. But the flow above is simpler and
6451 * All ILT entries are written by zero+valid and not just PF6/7
6452 * ILT entries since in the future the ILT entries allocation for
6453 * PF-s might be dynamic.
6455 struct ilt_client_info ilt_cli;
6456 struct bnx2x_ilt ilt;
6457 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6458 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6460 /* initialize dummy TM client */
6462 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6463 ilt_cli.client_num = ILT_CLIENT_TM;
6465 /* Step 1: set zeroes to all ilt page entries with valid bit on
6466 * Step 2: set the timers first/last ilt entry to point
6467 * to the entire range to prevent ILT range error for 3rd/4th
6468 * vnic (this code assumes existance of the vnic)
6470 * both steps performed by call to bnx2x_ilt_client_init_op()
6471 * with dummy TM client
6473 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6474 * and his brother are split registers
6476 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6477 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6478 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6480 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6481 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6482 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6486 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6487 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6489 if (!CHIP_IS_E1x(bp)) {
6490 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6491 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6492 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6494 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6496 /* let the HW do it's magic ... */
6499 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6500 } while (factor-- && (val != 1));
6503 BNX2X_ERR("ATC_INIT failed\n");
6508 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6510 /* clean the DMAE memory */
6512 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6514 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6516 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6518 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6520 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6522 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6523 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6524 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6525 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6527 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6530 /* QM queues pointers table */
6531 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6533 /* soft reset pulse */
6534 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6535 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6537 if (CNIC_SUPPORT(bp))
6538 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6540 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6541 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
6542 if (!CHIP_REV_IS_SLOW(bp))
6543 /* enable hw interrupt from doorbell Q */
6544 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6546 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6548 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6549 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6551 if (!CHIP_IS_E1(bp))
6552 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6554 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6555 if (IS_MF_AFEX(bp)) {
6556 /* configure that VNTag and VLAN headers must be
6557 * received in afex mode
6559 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6560 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6561 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6562 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6563 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6565 /* Bit-map indicating which L2 hdrs may appear
6566 * after the basic Ethernet header
6568 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6569 bp->path_has_ovlan ? 7 : 6);
6573 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6574 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6575 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6576 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6578 if (!CHIP_IS_E1x(bp)) {
6579 /* reset VFC memories */
6580 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6581 VFC_MEMORIES_RST_REG_CAM_RST |
6582 VFC_MEMORIES_RST_REG_RAM_RST);
6583 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6584 VFC_MEMORIES_RST_REG_CAM_RST |
6585 VFC_MEMORIES_RST_REG_RAM_RST);
6590 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6591 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6592 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6593 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6596 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6598 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6601 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6602 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6603 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6605 if (!CHIP_IS_E1x(bp)) {
6606 if (IS_MF_AFEX(bp)) {
6607 /* configure that VNTag and VLAN headers must be
6610 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6611 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6612 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6613 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6614 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6616 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6617 bp->path_has_ovlan ? 7 : 6);
6621 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6623 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6625 if (CNIC_SUPPORT(bp)) {
6626 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6627 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6628 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6629 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6630 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6631 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6632 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6633 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6634 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6635 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6637 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6639 if (sizeof(union cdu_context) != 1024)
6640 /* we currently assume that a context is 1024 bytes */
6641 dev_alert(&bp->pdev->dev,
6642 "please adjust the size of cdu_context(%ld)\n",
6643 (long)sizeof(union cdu_context));
6645 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
6646 val = (4 << 24) + (0 << 12) + 1024;
6647 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6649 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
6650 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6651 /* enable context validation interrupt from CFC */
6652 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6654 /* set the thresholds to prevent CFC/CDU race */
6655 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6657 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
6659 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
6660 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
6662 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
6663 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
6665 /* Reset PCIE errors for debug */
6666 REG_WR(bp, 0x2814, 0xffffffff);
6667 REG_WR(bp, 0x3820, 0xffffffff);
6669 if (!CHIP_IS_E1x(bp)) {
6670 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
6671 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
6672 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
6673 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
6674 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
6675 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
6676 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
6677 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
6678 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
6679 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
6680 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
6683 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
6684 if (!CHIP_IS_E1(bp)) {
6685 /* in E3 this done in per-port section */
6686 if (!CHIP_IS_E3(bp))
6687 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
6689 if (CHIP_IS_E1H(bp))
6690 /* not applicable for E2 (and above ...) */
6691 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
6693 if (CHIP_REV_IS_SLOW(bp))
6696 /* finish CFC init */
6697 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6699 BNX2X_ERR("CFC LL_INIT failed\n");
6702 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6704 BNX2X_ERR("CFC AC_INIT failed\n");
6707 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6709 BNX2X_ERR("CFC CAM_INIT failed\n");
6712 REG_WR(bp, CFC_REG_DEBUG0, 0);
6714 if (CHIP_IS_E1(bp)) {
6715 /* read NIG statistic
6716 to see if this is our first up since powerup */
6717 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6718 val = *bnx2x_sp(bp, wb_data[0]);
6720 /* do internal memory self test */
6721 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6722 BNX2X_ERR("internal mem self test failed\n");
6727 bnx2x_setup_fan_failure_detection(bp);
6729 /* clear PXP2 attentions */
6730 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6732 bnx2x_enable_blocks_attention(bp);
6733 bnx2x_enable_blocks_parity(bp);
6735 if (!BP_NOMCP(bp)) {
6736 if (CHIP_IS_E1x(bp))
6737 bnx2x__common_init_phy(bp);
6739 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6745 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
6747 * @bp: driver handle
6749 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
6751 int rc = bnx2x_init_hw_common(bp);
6756 /* In E2 2-PORT mode, same ext phy is used for the two paths */
6758 bnx2x__common_init_phy(bp);
6763 static int bnx2x_init_hw_port(struct bnx2x *bp)
6765 int port = BP_PORT(bp);
6766 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
6771 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
6773 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6775 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
6776 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
6777 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
6779 /* Timers bug workaround: disables the pf_master bit in pglue at
6780 * common phase, we need to enable it here before any dmae access are
6781 * attempted. Therefore we manually added the enable-master to the
6782 * port phase (it also happens in the function phase)
6784 if (!CHIP_IS_E1x(bp))
6785 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
6787 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
6788 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
6789 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
6790 bnx2x_init_block(bp, BLOCK_QM, init_phase);
6792 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
6793 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
6794 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
6795 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
6797 /* QM cid (connection) count */
6798 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
6800 if (CNIC_SUPPORT(bp)) {
6801 bnx2x_init_block(bp, BLOCK_TM, init_phase);
6802 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6803 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6806 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
6808 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
6810 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
6813 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6814 else if (bp->dev->mtu > 4096) {
6815 if (bp->flags & ONE_PORT_FLAG)
6819 /* (24*1024 + val*4)/256 */
6820 low = 96 + (val/64) +
6821 ((val % 64) ? 1 : 0);
6824 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6825 high = low + 56; /* 14*1024/256 */
6826 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6827 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6830 if (CHIP_MODE_IS_4_PORT(bp))
6831 REG_WR(bp, (BP_PORT(bp) ?
6832 BRB1_REG_MAC_GUARANTIED_1 :
6833 BRB1_REG_MAC_GUARANTIED_0), 40);
6836 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6837 if (CHIP_IS_E3B0(bp)) {
6838 if (IS_MF_AFEX(bp)) {
6839 /* configure headers for AFEX mode */
6840 REG_WR(bp, BP_PORT(bp) ?
6841 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6842 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6843 REG_WR(bp, BP_PORT(bp) ?
6844 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6845 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6846 REG_WR(bp, BP_PORT(bp) ?
6847 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6848 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6850 /* Ovlan exists only if we are in multi-function +
6851 * switch-dependent mode, in switch-independent there
6852 * is no ovlan headers
6854 REG_WR(bp, BP_PORT(bp) ?
6855 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6856 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6857 (bp->path_has_ovlan ? 7 : 6));
6861 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6862 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
6863 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
6864 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
6866 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
6867 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
6868 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
6869 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
6871 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
6872 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
6874 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
6876 if (CHIP_IS_E1x(bp)) {
6877 /* configure PBF to work without PAUSE mtu 9000 */
6878 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6880 /* update threshold */
6881 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6882 /* update init credit */
6883 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6886 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6888 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6891 if (CNIC_SUPPORT(bp))
6892 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
6894 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
6895 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
6897 if (CHIP_IS_E1(bp)) {
6898 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6899 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6901 bnx2x_init_block(bp, BLOCK_HC, init_phase);
6903 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
6905 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
6906 /* init aeu_mask_attn_func_0/1:
6907 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6908 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6909 * bits 4-7 are used for "per vn group attention" */
6910 val = IS_MF(bp) ? 0xF7 : 0x7;
6911 /* Enable DCBX attention for all but E1 */
6912 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
6913 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
6915 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
6917 if (!CHIP_IS_E1x(bp)) {
6918 /* Bit-map indicating which L2 hdrs may appear after the
6919 * basic Ethernet header
6922 REG_WR(bp, BP_PORT(bp) ?
6923 NIG_REG_P1_HDRS_AFTER_BASIC :
6924 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
6926 REG_WR(bp, BP_PORT(bp) ?
6927 NIG_REG_P1_HDRS_AFTER_BASIC :
6928 NIG_REG_P0_HDRS_AFTER_BASIC,
6929 IS_MF_SD(bp) ? 7 : 6);
6932 REG_WR(bp, BP_PORT(bp) ?
6933 NIG_REG_LLH1_MF_MODE :
6934 NIG_REG_LLH_MF_MODE, IS_MF(bp));
6936 if (!CHIP_IS_E3(bp))
6937 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6939 if (!CHIP_IS_E1(bp)) {
6940 /* 0x2 disable mf_ov, 0x1 enable */
6941 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6942 (IS_MF_SD(bp) ? 0x1 : 0x2));
6944 if (!CHIP_IS_E1x(bp)) {
6946 switch (bp->mf_mode) {
6947 case MULTI_FUNCTION_SD:
6950 case MULTI_FUNCTION_SI:
6951 case MULTI_FUNCTION_AFEX:
6956 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
6957 NIG_REG_LLH0_CLS_TYPE), val);
6960 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6961 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6962 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6967 /* If SPIO5 is set to generate interrupts, enable it for this port */
6968 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6969 if (val & MISC_SPIO_SPIO5) {
6970 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6971 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6972 val = REG_RD(bp, reg_addr);
6973 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6974 REG_WR(bp, reg_addr, val);
6980 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6986 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6988 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6990 wb_write[0] = ONCHIP_ADDR1(addr);
6991 wb_write[1] = ONCHIP_ADDR2(addr);
6992 REG_WR_DMAE(bp, reg, wb_write, 2);
6995 static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
6996 u8 idu_sb_id, bool is_Pf)
6998 u32 data, ctl, cnt = 100;
6999 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7000 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7001 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7002 u32 sb_bit = 1 << (idu_sb_id%32);
7003 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7004 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7006 /* Not supported in BC mode */
7007 if (CHIP_INT_MODE_IS_BC(bp))
7010 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7011 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7012 IGU_REGULAR_CLEANUP_SET |
7013 IGU_REGULAR_BCLEANUP;
7015 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7016 func_encode << IGU_CTRL_REG_FID_SHIFT |
7017 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7019 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7020 data, igu_addr_data);
7021 REG_WR(bp, igu_addr_data, data);
7024 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7026 REG_WR(bp, igu_addr_ctl, ctl);
7030 /* wait for clean up to finish */
7031 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7035 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7037 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7038 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7042 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7044 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7047 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7049 u32 i, base = FUNC_ILT_BASE(func);
7050 for (i = base; i < base + ILT_PER_FUNC; i++)
7051 bnx2x_ilt_wr(bp, i, 0);
7055 static void bnx2x_init_searcher(struct bnx2x *bp)
7057 int port = BP_PORT(bp);
7058 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7059 /* T1 hash bits value determines the T1 number of entries */
7060 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7063 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7066 struct bnx2x_func_state_params func_params = {NULL};
7067 struct bnx2x_func_switch_update_params *switch_update_params =
7068 &func_params.params.switch_update;
7070 /* Prepare parameters for function state transitions */
7071 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7072 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7074 func_params.f_obj = &bp->func_obj;
7075 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7077 /* Function parameters */
7078 switch_update_params->suspend = suspend;
7080 rc = bnx2x_func_state_change(bp, &func_params);
7085 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7087 int rc, i, port = BP_PORT(bp);
7088 int vlan_en = 0, mac_en[NUM_MACS];
7091 /* Close input from network */
7092 if (bp->mf_mode == SINGLE_FUNCTION) {
7093 bnx2x_set_rx_filter(&bp->link_params, 0);
7095 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7096 NIG_REG_LLH0_FUNC_EN);
7097 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7098 NIG_REG_LLH0_FUNC_EN, 0);
7099 for (i = 0; i < NUM_MACS; i++) {
7100 mac_en[i] = REG_RD(bp, port ?
7101 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7103 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7105 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7107 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7111 /* Close BMC to host */
7112 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7113 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7115 /* Suspend Tx switching to the PF. Completion of this ramrod
7116 * further guarantees that all the packets of that PF / child
7117 * VFs in BRB were processed by the Parser, so it is safe to
7118 * change the NIC_MODE register.
7120 rc = bnx2x_func_switch_update(bp, 1);
7122 BNX2X_ERR("Can't suspend tx-switching!\n");
7126 /* Change NIC_MODE register */
7127 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7129 /* Open input from network */
7130 if (bp->mf_mode == SINGLE_FUNCTION) {
7131 bnx2x_set_rx_filter(&bp->link_params, 1);
7133 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7134 NIG_REG_LLH0_FUNC_EN, vlan_en);
7135 for (i = 0; i < NUM_MACS; i++) {
7136 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7138 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7143 /* Enable BMC to host */
7144 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7145 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7147 /* Resume Tx switching to the PF */
7148 rc = bnx2x_func_switch_update(bp, 0);
7150 BNX2X_ERR("Can't resume tx-switching!\n");
7154 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7158 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7162 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7164 if (CONFIGURE_NIC_MODE(bp)) {
7165 /* Configrue searcher as part of function hw init */
7166 bnx2x_init_searcher(bp);
7168 /* Reset NIC mode */
7169 rc = bnx2x_reset_nic_mode(bp);
7171 BNX2X_ERR("Can't change NIC mode!\n");
7178 static int bnx2x_init_hw_func(struct bnx2x *bp)
7180 int port = BP_PORT(bp);
7181 int func = BP_FUNC(bp);
7182 int init_phase = PHASE_PF0 + func;
7183 struct bnx2x_ilt *ilt = BP_ILT(bp);
7186 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7187 int i, main_mem_width, rc;
7189 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7191 /* FLR cleanup - hmmm */
7192 if (!CHIP_IS_E1x(bp)) {
7193 rc = bnx2x_pf_flr_clnup(bp);
7198 /* set MSI reconfigure capability */
7199 if (bp->common.int_block == INT_BLOCK_HC) {
7200 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7201 val = REG_RD(bp, addr);
7202 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7203 REG_WR(bp, addr, val);
7206 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7207 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7210 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7212 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7213 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7214 ilt->lines[cdu_ilt_start + i].page_mapping =
7215 bp->context[i].cxt_mapping;
7216 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7218 bnx2x_ilt_init_op(bp, INITOP_SET);
7220 if (!CONFIGURE_NIC_MODE(bp)) {
7221 bnx2x_init_searcher(bp);
7222 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7223 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7226 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7227 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
7231 if (!CHIP_IS_E1x(bp)) {
7232 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7234 /* Turn on a single ISR mode in IGU if driver is going to use
7237 if (!(bp->flags & USING_MSIX_FLAG))
7238 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7240 * Timers workaround bug: function init part.
7241 * Need to wait 20msec after initializing ILT,
7242 * needed to make sure there are no requests in
7243 * one of the PXP internal queues with "old" ILT addresses
7247 * Master enable - Due to WB DMAE writes performed before this
7248 * register is re-initialized as part of the regular function
7251 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7252 /* Enable the function in IGU */
7253 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7258 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7260 if (!CHIP_IS_E1x(bp))
7261 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7263 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7264 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7265 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7266 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7267 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7268 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7269 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7270 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7271 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7272 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7273 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7274 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7275 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7277 if (!CHIP_IS_E1x(bp))
7278 REG_WR(bp, QM_REG_PF_EN, 1);
7280 if (!CHIP_IS_E1x(bp)) {
7281 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7282 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7283 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7284 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7286 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7288 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7289 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7290 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7291 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7292 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7293 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7294 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7295 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7296 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7297 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7298 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7299 if (!CHIP_IS_E1x(bp))
7300 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7302 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7304 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7306 if (!CHIP_IS_E1x(bp))
7307 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7310 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7311 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7314 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7316 /* HC init per function */
7317 if (bp->common.int_block == INT_BLOCK_HC) {
7318 if (CHIP_IS_E1H(bp)) {
7319 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7321 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7322 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7324 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7327 int num_segs, sb_idx, prod_offset;
7329 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7331 if (!CHIP_IS_E1x(bp)) {
7332 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7333 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7336 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7338 if (!CHIP_IS_E1x(bp)) {
7342 * E2 mode: address 0-135 match to the mapping memory;
7343 * 136 - PF0 default prod; 137 - PF1 default prod;
7344 * 138 - PF2 default prod; 139 - PF3 default prod;
7345 * 140 - PF0 attn prod; 141 - PF1 attn prod;
7346 * 142 - PF2 attn prod; 143 - PF3 attn prod;
7349 * E1.5 mode - In backward compatible mode;
7350 * for non default SB; each even line in the memory
7351 * holds the U producer and each odd line hold
7352 * the C producer. The first 128 producers are for
7353 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7354 * producers are for the DSB for each PF.
7355 * Each PF has five segments: (the order inside each
7356 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7357 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7358 * 144-147 attn prods;
7360 /* non-default-status-blocks */
7361 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7362 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7363 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7364 prod_offset = (bp->igu_base_sb + sb_idx) *
7367 for (i = 0; i < num_segs; i++) {
7368 addr = IGU_REG_PROD_CONS_MEMORY +
7369 (prod_offset + i) * 4;
7370 REG_WR(bp, addr, 0);
7372 /* send consumer update with value 0 */
7373 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7374 USTORM_ID, 0, IGU_INT_NOP, 1);
7375 bnx2x_igu_clear_sb(bp,
7376 bp->igu_base_sb + sb_idx);
7379 /* default-status-blocks */
7380 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7381 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7383 if (CHIP_MODE_IS_4_PORT(bp))
7384 dsb_idx = BP_FUNC(bp);
7386 dsb_idx = BP_VN(bp);
7388 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7389 IGU_BC_BASE_DSB_PROD + dsb_idx :
7390 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7393 * igu prods come in chunks of E1HVN_MAX (4) -
7394 * does not matters what is the current chip mode
7396 for (i = 0; i < (num_segs * E1HVN_MAX);
7398 addr = IGU_REG_PROD_CONS_MEMORY +
7399 (prod_offset + i)*4;
7400 REG_WR(bp, addr, 0);
7402 /* send consumer update with 0 */
7403 if (CHIP_INT_MODE_IS_BC(bp)) {
7404 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7405 USTORM_ID, 0, IGU_INT_NOP, 1);
7406 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7407 CSTORM_ID, 0, IGU_INT_NOP, 1);
7408 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7409 XSTORM_ID, 0, IGU_INT_NOP, 1);
7410 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7411 TSTORM_ID, 0, IGU_INT_NOP, 1);
7412 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7413 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7415 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7416 USTORM_ID, 0, IGU_INT_NOP, 1);
7417 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7418 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7420 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7422 /* !!! these should become driver const once
7423 rf-tool supports split-68 const */
7424 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7425 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7426 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7427 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7428 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7429 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7433 /* Reset PCIE errors for debug */
7434 REG_WR(bp, 0x2114, 0xffffffff);
7435 REG_WR(bp, 0x2120, 0xffffffff);
7437 if (CHIP_IS_E1x(bp)) {
7438 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7439 main_mem_base = HC_REG_MAIN_MEMORY +
7440 BP_PORT(bp) * (main_mem_size * 4);
7441 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7444 val = REG_RD(bp, main_mem_prty_clr);
7447 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7450 /* Clear "false" parity errors in MSI-X table */
7451 for (i = main_mem_base;
7452 i < main_mem_base + main_mem_size * 4;
7453 i += main_mem_width) {
7454 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7455 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7456 i, main_mem_width / 4);
7458 /* Clear HC parity attention */
7459 REG_RD(bp, main_mem_prty_clr);
7462 #ifdef BNX2X_STOP_ON_ERROR
7463 /* Enable STORMs SP logging */
7464 REG_WR8(bp, BAR_USTRORM_INTMEM +
7465 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7466 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7467 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7468 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7469 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7470 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7471 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7474 bnx2x_phy_probe(&bp->link_params);
7480 void bnx2x_free_mem_cnic(struct bnx2x *bp)
7482 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7484 if (!CHIP_IS_E1x(bp))
7485 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7486 sizeof(struct host_hc_status_block_e2));
7488 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7489 sizeof(struct host_hc_status_block_e1x));
7491 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7494 void bnx2x_free_mem(struct bnx2x *bp)
7499 bnx2x_free_fp_mem(bp);
7500 /* end of fastpath */
7502 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7503 sizeof(struct host_sp_status_block));
7505 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7506 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7508 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7509 sizeof(struct bnx2x_slowpath));
7511 for (i = 0; i < L2_ILT_LINES(bp); i++)
7512 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7513 bp->context[i].size);
7514 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7516 BNX2X_FREE(bp->ilt->lines);
7518 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7520 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7521 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7524 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7527 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
7529 /* number of queues for statistics is number of eth queues + FCoE */
7530 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
7532 /* Total number of FW statistics requests =
7533 * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
7536 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
7539 /* Request is built from stats_query_header and an array of
7540 * stats_query_cmd_group each of which contains
7541 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
7542 * configured in the stats_query_header.
7544 num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
7545 (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
7547 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
7548 num_groups * sizeof(struct stats_query_cmd_group);
7550 /* Data for statistics requests + stats_conter
7552 * stats_counter holds per-STORM counters that are incremented
7553 * when STORM has finished with the current request.
7555 * memory for FCoE offloaded statistics are counted anyway,
7556 * even if they will not be sent.
7558 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
7559 sizeof(struct per_pf_stats) +
7560 sizeof(struct fcoe_statistics_params) +
7561 sizeof(struct per_queue_stats) * num_queue_stats +
7562 sizeof(struct stats_counter);
7564 BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
7565 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7568 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
7569 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
7571 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
7572 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
7574 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
7575 bp->fw_stats_req_sz;
7579 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7580 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7581 BNX2X_ERR("Can't allocate memory\n");
7585 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7587 if (!CHIP_IS_E1x(bp))
7588 /* size = the status block + ramrod buffers */
7589 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7590 sizeof(struct host_hc_status_block_e2));
7592 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7593 &bp->cnic_sb_mapping,
7595 host_hc_status_block_e1x));
7597 if (CONFIGURE_NIC_MODE(bp))
7598 /* allocate searcher T2 table, as it wan't allocated before */
7599 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7601 /* write address to which L5 should insert its values */
7602 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7603 &bp->slowpath->drv_info_to_mcp;
7605 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7611 bnx2x_free_mem_cnic(bp);
7612 BNX2X_ERR("Can't allocate memory\n");
7616 int bnx2x_alloc_mem(struct bnx2x *bp)
7618 int i, allocated, context_size;
7620 if (!CONFIGURE_NIC_MODE(bp))
7621 /* allocate searcher T2 table */
7622 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7624 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7625 sizeof(struct host_sp_status_block));
7627 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7628 sizeof(struct bnx2x_slowpath));
7630 /* Allocated memory for FW statistics */
7631 if (bnx2x_alloc_fw_stats_mem(bp))
7634 /* Allocate memory for CDU context:
7635 * This memory is allocated separately and not in the generic ILT
7636 * functions because CDU differs in few aspects:
7637 * 1. There are multiple entities allocating memory for context -
7638 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7639 * its own ILT lines.
7640 * 2. Since CDU page-size is not a single 4KB page (which is the case
7641 * for the other ILT clients), to be efficient we want to support
7642 * allocation of sub-page-size in the last entry.
7643 * 3. Context pointers are used by the driver to pass to FW / update
7644 * the context (for the other ILT clients the pointers are used just to
7645 * free the memory during unload).
7647 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7649 for (i = 0, allocated = 0; allocated < context_size; i++) {
7650 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7651 (context_size - allocated));
7652 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7653 &bp->context[i].cxt_mapping,
7654 bp->context[i].size);
7655 allocated += bp->context[i].size;
7657 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7659 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7662 /* Slow path ring */
7663 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7666 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7667 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7671 /* need to be done at the end, since it's self adjusting to amount
7672 * of memory available for RSS queues
7674 if (bnx2x_alloc_fp_mem(bp))
7680 BNX2X_ERR("Can't allocate memory\n");
7685 * Init service functions
7688 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7689 struct bnx2x_vlan_mac_obj *obj, bool set,
7690 int mac_type, unsigned long *ramrod_flags)
7693 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
7695 memset(&ramrod_param, 0, sizeof(ramrod_param));
7697 /* Fill general parameters */
7698 ramrod_param.vlan_mac_obj = obj;
7699 ramrod_param.ramrod_flags = *ramrod_flags;
7701 /* Fill a user request section if needed */
7702 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
7703 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
7705 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
7707 /* Set the command: ADD or DEL */
7709 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
7711 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
7714 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7716 if (rc == -EEXIST) {
7717 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
7718 /* do not treat adding same MAC as error */
7721 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7726 int bnx2x_del_all_macs(struct bnx2x *bp,
7727 struct bnx2x_vlan_mac_obj *mac_obj,
7728 int mac_type, bool wait_for_comp)
7731 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
7733 /* Wait for completion of requested */
7735 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7737 /* Set the mac type of addresses we want to clear */
7738 __set_bit(mac_type, &vlan_mac_flags);
7740 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7742 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
7747 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7749 unsigned long ramrod_flags = 0;
7751 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7752 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7753 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7754 "Ignoring Zero MAC for STORAGE SD mode\n");
7758 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7760 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7761 /* Eth MAC is set on RSS leading client (fp[0]) */
7762 return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj,
7763 set, BNX2X_ETH_MAC, &ramrod_flags);
7766 int bnx2x_setup_leading(struct bnx2x *bp)
7768 return bnx2x_setup_queue(bp, &bp->fp[0], 1);
7772 * bnx2x_set_int_mode - configure interrupt mode
7774 * @bp: driver handle
7776 * In case of MSI-X it will also try to enable MSI-X.
7778 void bnx2x_set_int_mode(struct bnx2x *bp)
7782 bnx2x_enable_msi(bp);
7783 /* falling through... */
7785 bp->num_ethernet_queues = 1;
7786 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
7787 BNX2X_DEV_INFO("set number of queues to 1\n");
7790 /* if we can't use MSI-X we only need one fp,
7791 * so try to enable MSI-X with the requested number of fp's
7792 * and fallback to MSI or legacy INTx with one fp
7794 if (bnx2x_enable_msix(bp) ||
7795 bp->flags & USING_SINGLE_MSIX_FLAG) {
7796 /* failed to enable multiple MSI-X */
7797 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7799 1 + bp->num_cnic_queues);
7801 bp->num_queues = 1 + bp->num_cnic_queues;
7803 /* Try to enable MSI */
7804 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
7805 !(bp->flags & DISABLE_MSI_FLAG))
7806 bnx2x_enable_msi(bp);
7812 /* must be called prioir to any HW initializations */
7813 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
7815 return L2_ILT_LINES(bp);
7818 void bnx2x_ilt_set_info(struct bnx2x *bp)
7820 struct ilt_client_info *ilt_client;
7821 struct bnx2x_ilt *ilt = BP_ILT(bp);
7824 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
7825 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
7828 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
7829 ilt_client->client_num = ILT_CLIENT_CDU;
7830 ilt_client->page_size = CDU_ILT_PAGE_SZ;
7831 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
7832 ilt_client->start = line;
7833 line += bnx2x_cid_ilt_lines(bp);
7835 if (CNIC_SUPPORT(bp))
7836 line += CNIC_ILT_LINES;
7837 ilt_client->end = line - 1;
7839 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7842 ilt_client->page_size,
7844 ilog2(ilt_client->page_size >> 12));
7847 if (QM_INIT(bp->qm_cid_count)) {
7848 ilt_client = &ilt->clients[ILT_CLIENT_QM];
7849 ilt_client->client_num = ILT_CLIENT_QM;
7850 ilt_client->page_size = QM_ILT_PAGE_SZ;
7851 ilt_client->flags = 0;
7852 ilt_client->start = line;
7854 /* 4 bytes for each cid */
7855 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
7858 ilt_client->end = line - 1;
7861 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7864 ilt_client->page_size,
7866 ilog2(ilt_client->page_size >> 12));
7870 if (CNIC_SUPPORT(bp)) {
7872 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
7873 ilt_client->client_num = ILT_CLIENT_SRC;
7874 ilt_client->page_size = SRC_ILT_PAGE_SZ;
7875 ilt_client->flags = 0;
7876 ilt_client->start = line;
7877 line += SRC_ILT_LINES;
7878 ilt_client->end = line - 1;
7881 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7884 ilt_client->page_size,
7886 ilog2(ilt_client->page_size >> 12));
7889 ilt_client = &ilt->clients[ILT_CLIENT_TM];
7890 ilt_client->client_num = ILT_CLIENT_TM;
7891 ilt_client->page_size = TM_ILT_PAGE_SZ;
7892 ilt_client->flags = 0;
7893 ilt_client->start = line;
7894 line += TM_ILT_LINES;
7895 ilt_client->end = line - 1;
7898 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
7901 ilt_client->page_size,
7903 ilog2(ilt_client->page_size >> 12));
7906 BUG_ON(line > ILT_MAX_LINES);
7910 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
7912 * @bp: driver handle
7913 * @fp: pointer to fastpath
7914 * @init_params: pointer to parameters structure
7916 * parameters configured:
7917 * - HC configuration
7918 * - Queue's CDU context
7920 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7921 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7925 int cxt_index, cxt_offset;
7927 /* FCoE Queue uses Default SB, thus has no HC capabilities */
7928 if (!IS_FCOE_FP(fp)) {
7929 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
7930 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
7932 /* If HC is supporterd, enable host coalescing in the transition
7935 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
7936 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
7939 init_params->rx.hc_rate = bp->rx_ticks ?
7940 (1000000 / bp->rx_ticks) : 0;
7941 init_params->tx.hc_rate = bp->tx_ticks ?
7942 (1000000 / bp->tx_ticks) : 0;
7945 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
7949 * CQ index among the SB indices: FCoE clients uses the default
7950 * SB, therefore it's different.
7952 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
7953 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
7956 /* set maximum number of COSs supported by this queue */
7957 init_params->max_cos = fp->max_cos;
7959 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
7960 fp->index, init_params->max_cos);
7962 /* set the context pointers queue object */
7963 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
7964 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
7965 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
7967 init_params->cxts[cos] =
7968 &bp->context[cxt_index].vcxt[cxt_offset].eth;
7972 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
7973 struct bnx2x_queue_state_params *q_params,
7974 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
7975 int tx_index, bool leading)
7977 memset(tx_only_params, 0, sizeof(*tx_only_params));
7979 /* Set the command */
7980 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
7982 /* Set tx-only QUEUE flags: don't zero statistics */
7983 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
7985 /* choose the index of the cid to send the slow path on */
7986 tx_only_params->cid_index = tx_index;
7988 /* Set general TX_ONLY_SETUP parameters */
7989 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
7991 /* Set Tx TX_ONLY_SETUP parameters */
7992 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
7995 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
7996 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
7997 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
7998 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8000 /* send the ramrod */
8001 return bnx2x_queue_state_change(bp, q_params);
8006 * bnx2x_setup_queue - setup queue
8008 * @bp: driver handle
8009 * @fp: pointer to fastpath
8010 * @leading: is leading
8012 * This function performs 2 steps in a Queue state machine
8013 * actually: 1) RESET->INIT 2) INIT->SETUP
8016 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8019 struct bnx2x_queue_state_params q_params = {NULL};
8020 struct bnx2x_queue_setup_params *setup_params =
8021 &q_params.params.setup;
8022 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8023 &q_params.params.tx_only;
8027 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8029 /* reset IGU state skip FCoE L2 queue */
8030 if (!IS_FCOE_FP(fp))
8031 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8034 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8035 /* We want to wait for completion in this context */
8036 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8038 /* Prepare the INIT parameters */
8039 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8041 /* Set the command */
8042 q_params.cmd = BNX2X_Q_CMD_INIT;
8044 /* Change the state to INIT */
8045 rc = bnx2x_queue_state_change(bp, &q_params);
8047 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8051 DP(NETIF_MSG_IFUP, "init complete\n");
8054 /* Now move the Queue to the SETUP state... */
8055 memset(setup_params, 0, sizeof(*setup_params));
8057 /* Set QUEUE flags */
8058 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8060 /* Set general SETUP parameters */
8061 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8062 FIRST_TX_COS_INDEX);
8064 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8065 &setup_params->rxq_params);
8067 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8068 FIRST_TX_COS_INDEX);
8070 /* Set the command */
8071 q_params.cmd = BNX2X_Q_CMD_SETUP;
8074 bp->fcoe_init = true;
8076 /* Change the state to SETUP */
8077 rc = bnx2x_queue_state_change(bp, &q_params);
8079 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8083 /* loop through the relevant tx-only indices */
8084 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8085 tx_index < fp->max_cos;
8088 /* prepare and send tx-only ramrod*/
8089 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8090 tx_only_params, tx_index, leading);
8092 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8093 fp->index, tx_index);
8101 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8103 struct bnx2x_fastpath *fp = &bp->fp[index];
8104 struct bnx2x_fp_txdata *txdata;
8105 struct bnx2x_queue_state_params q_params = {NULL};
8108 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8110 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8111 /* We want to wait for completion in this context */
8112 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8115 /* close tx-only connections */
8116 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8117 tx_index < fp->max_cos;
8120 /* ascertain this is a normal queue*/
8121 txdata = fp->txdata_ptr[tx_index];
8123 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8126 /* send halt terminate on tx-only connection */
8127 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8128 memset(&q_params.params.terminate, 0,
8129 sizeof(q_params.params.terminate));
8130 q_params.params.terminate.cid_index = tx_index;
8132 rc = bnx2x_queue_state_change(bp, &q_params);
8136 /* send halt terminate on tx-only connection */
8137 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8138 memset(&q_params.params.cfc_del, 0,
8139 sizeof(q_params.params.cfc_del));
8140 q_params.params.cfc_del.cid_index = tx_index;
8141 rc = bnx2x_queue_state_change(bp, &q_params);
8145 /* Stop the primary connection: */
8146 /* ...halt the connection */
8147 q_params.cmd = BNX2X_Q_CMD_HALT;
8148 rc = bnx2x_queue_state_change(bp, &q_params);
8152 /* ...terminate the connection */
8153 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8154 memset(&q_params.params.terminate, 0,
8155 sizeof(q_params.params.terminate));
8156 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8157 rc = bnx2x_queue_state_change(bp, &q_params);
8160 /* ...delete cfc entry */
8161 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8162 memset(&q_params.params.cfc_del, 0,
8163 sizeof(q_params.params.cfc_del));
8164 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8165 return bnx2x_queue_state_change(bp, &q_params);
8169 static void bnx2x_reset_func(struct bnx2x *bp)
8171 int port = BP_PORT(bp);
8172 int func = BP_FUNC(bp);
8175 /* Disable the function in the FW */
8176 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8177 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8178 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8179 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8182 for_each_eth_queue(bp, i) {
8183 struct bnx2x_fastpath *fp = &bp->fp[i];
8184 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8185 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8189 if (CNIC_LOADED(bp))
8191 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8192 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8193 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8196 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8197 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8200 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8201 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8205 if (bp->common.int_block == INT_BLOCK_HC) {
8206 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8207 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8209 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8210 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8213 if (CNIC_LOADED(bp)) {
8214 /* Disable Timer scan */
8215 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8217 * Wait for at least 10ms and up to 2 second for the timers
8220 for (i = 0; i < 200; i++) {
8222 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8227 bnx2x_clear_func_ilt(bp, func);
8229 /* Timers workaround bug for E2: if this is vnic-3,
8230 * we need to set the entire ilt range for this timers.
8232 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8233 struct ilt_client_info ilt_cli;
8234 /* use dummy TM client */
8235 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8237 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8238 ilt_cli.client_num = ILT_CLIENT_TM;
8240 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8243 /* this assumes that reset_port() called before reset_func()*/
8244 if (!CHIP_IS_E1x(bp))
8245 bnx2x_pf_disable(bp);
8250 static void bnx2x_reset_port(struct bnx2x *bp)
8252 int port = BP_PORT(bp);
8255 /* Reset physical Link */
8256 bnx2x__link_reset(bp);
8258 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8260 /* Do not rcv packets to BRB */
8261 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8262 /* Do not direct rcv packets that are not for MCP to the BRB */
8263 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8264 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8267 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8270 /* Check for BRB port occupancy */
8271 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8273 DP(NETIF_MSG_IFDOWN,
8274 "BRB1 is not empty %d blocks are occupied\n", val);
8276 /* TODO: Close Doorbell port? */
8279 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8281 struct bnx2x_func_state_params func_params = {NULL};
8283 /* Prepare parameters for function state transitions */
8284 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8286 func_params.f_obj = &bp->func_obj;
8287 func_params.cmd = BNX2X_F_CMD_HW_RESET;
8289 func_params.params.hw_init.load_phase = load_code;
8291 return bnx2x_func_state_change(bp, &func_params);
8294 static int bnx2x_func_stop(struct bnx2x *bp)
8296 struct bnx2x_func_state_params func_params = {NULL};
8299 /* Prepare parameters for function state transitions */
8300 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8301 func_params.f_obj = &bp->func_obj;
8302 func_params.cmd = BNX2X_F_CMD_STOP;
8305 * Try to stop the function the 'good way'. If fails (in case
8306 * of a parity error during bnx2x_chip_cleanup()) and we are
8307 * not in a debug mode, perform a state transaction in order to
8308 * enable further HW_RESET transaction.
8310 rc = bnx2x_func_state_change(bp, &func_params);
8312 #ifdef BNX2X_STOP_ON_ERROR
8315 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8316 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8317 return bnx2x_func_state_change(bp, &func_params);
8325 * bnx2x_send_unload_req - request unload mode from the MCP.
8327 * @bp: driver handle
8328 * @unload_mode: requested function's unload mode
8330 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8332 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8335 int port = BP_PORT(bp);
8337 /* Select the UNLOAD request mode */
8338 if (unload_mode == UNLOAD_NORMAL)
8339 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8341 else if (bp->flags & NO_WOL_FLAG)
8342 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8345 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8346 u8 *mac_addr = bp->dev->dev_addr;
8350 /* The mac address is written to entries 1-4 to
8351 * preserve entry 0 which is used by the PMF
8353 u8 entry = (BP_VN(bp) + 1)*8;
8355 val = (mac_addr[0] << 8) | mac_addr[1];
8356 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8358 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8359 (mac_addr[4] << 8) | mac_addr[5];
8360 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8362 /* Enable the PME and clear the status */
8363 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
8364 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8365 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
8367 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8370 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8372 /* Send the request to the MCP */
8374 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8376 int path = BP_PATH(bp);
8378 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
8379 path, load_count[path][0], load_count[path][1],
8380 load_count[path][2]);
8381 load_count[path][0]--;
8382 load_count[path][1 + port]--;
8383 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
8384 path, load_count[path][0], load_count[path][1],
8385 load_count[path][2]);
8386 if (load_count[path][0] == 0)
8387 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8388 else if (load_count[path][1 + port] == 0)
8389 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8391 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8398 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8400 * @bp: driver handle
8401 * @keep_link: true iff link should be kept up
8403 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8405 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8407 /* Report UNLOAD_DONE to MCP */
8409 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8412 static int bnx2x_func_wait_started(struct bnx2x *bp)
8415 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8421 * (assumption: No Attention from MCP at this stage)
8422 * PMF probably in the middle of TXdisable/enable transaction
8423 * 1. Sync IRS for default SB
8424 * 2. Sync SP queue - this guarantes us that attention handling started
8425 * 3. Wait, that TXdisable/enable transaction completes
8427 * 1+2 guranty that if DCBx attention was scheduled it already changed
8428 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
8429 * received complettion for the transaction the state is TX_STOPPED.
8430 * State will return to STARTED after completion of TX_STOPPED-->STARTED
8434 /* make sure default SB ISR is done */
8436 synchronize_irq(bp->msix_table[0].vector);
8438 synchronize_irq(bp->pdev->irq);
8440 flush_workqueue(bnx2x_wq);
8442 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8443 BNX2X_F_STATE_STARTED && tout--)
8446 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8447 BNX2X_F_STATE_STARTED) {
8448 #ifdef BNX2X_STOP_ON_ERROR
8449 BNX2X_ERR("Wrong function state\n");
8453 * Failed to complete the transaction in a "good way"
8454 * Force both transactions with CLR bit
8456 struct bnx2x_func_state_params func_params = {NULL};
8458 DP(NETIF_MSG_IFDOWN,
8459 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8461 func_params.f_obj = &bp->func_obj;
8462 __set_bit(RAMROD_DRV_CLR_ONLY,
8463 &func_params.ramrod_flags);
8465 /* STARTED-->TX_ST0PPED */
8466 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8467 bnx2x_func_state_change(bp, &func_params);
8469 /* TX_ST0PPED-->STARTED */
8470 func_params.cmd = BNX2X_F_CMD_TX_START;
8471 return bnx2x_func_state_change(bp, &func_params);
8478 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8480 int port = BP_PORT(bp);
8483 struct bnx2x_mcast_ramrod_params rparam = {NULL};
8486 /* Wait until tx fastpath tasks complete */
8487 for_each_tx_queue(bp, i) {
8488 struct bnx2x_fastpath *fp = &bp->fp[i];
8490 for_each_cos_in_tx_queue(fp, cos)
8491 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8492 #ifdef BNX2X_STOP_ON_ERROR
8498 /* Give HW time to discard old tx messages */
8499 usleep_range(1000, 1000);
8501 /* Clean all ETH MACs */
8502 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8505 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8507 /* Clean up UC list */
8508 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8511 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8515 if (!CHIP_IS_E1(bp))
8516 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8518 /* Set "drop all" (stop Rx).
8519 * We need to take a netif_addr_lock() here in order to prevent
8520 * a race between the completion code and this code.
8522 netif_addr_lock_bh(bp->dev);
8523 /* Schedule the rx_mode command */
8524 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8525 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8527 bnx2x_set_storm_rx_mode(bp);
8529 /* Cleanup multicast configuration */
8530 rparam.mcast_obj = &bp->mcast_obj;
8531 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8533 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8535 netif_addr_unlock_bh(bp->dev);
8540 * Send the UNLOAD_REQUEST to the MCP. This will return if
8541 * this function should perform FUNC, PORT or COMMON HW
8544 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8547 * (assumption: No Attention from MCP at this stage)
8548 * PMF probably in the middle of TXdisable/enable transaction
8550 rc = bnx2x_func_wait_started(bp);
8552 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8553 #ifdef BNX2X_STOP_ON_ERROR
8558 /* Close multi and leading connections
8559 * Completions for ramrods are collected in a synchronous way
8561 for_each_eth_queue(bp, i)
8562 if (bnx2x_stop_queue(bp, i))
8563 #ifdef BNX2X_STOP_ON_ERROR
8569 if (CNIC_LOADED(bp)) {
8570 for_each_cnic_queue(bp, i)
8571 if (bnx2x_stop_queue(bp, i))
8572 #ifdef BNX2X_STOP_ON_ERROR
8579 /* If SP settings didn't get completed so far - something
8580 * very wrong has happen.
8582 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8583 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8585 #ifndef BNX2X_STOP_ON_ERROR
8588 rc = bnx2x_func_stop(bp);
8590 BNX2X_ERR("Function stop failed!\n");
8591 #ifdef BNX2X_STOP_ON_ERROR
8596 /* Disable HW interrupts, NAPI */
8597 bnx2x_netif_stop(bp, 1);
8598 /* Delete all NAPI objects */
8599 bnx2x_del_all_napi(bp);
8600 if (CNIC_LOADED(bp))
8601 bnx2x_del_all_napi_cnic(bp);
8606 /* Reset the chip */
8607 rc = bnx2x_reset_hw(bp, reset_code);
8609 BNX2X_ERR("HW_RESET failed\n");
8612 /* Report UNLOAD_DONE to MCP */
8613 bnx2x_send_unload_done(bp, keep_link);
8616 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8620 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
8622 if (CHIP_IS_E1(bp)) {
8623 int port = BP_PORT(bp);
8624 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8625 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8627 val = REG_RD(bp, addr);
8629 REG_WR(bp, addr, val);
8631 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8632 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8633 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8634 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8638 /* Close gates #2, #3 and #4: */
8639 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8643 /* Gates #2 and #4a are closed/opened for "not E1" only */
8644 if (!CHIP_IS_E1(bp)) {
8646 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
8648 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
8652 if (CHIP_IS_E1x(bp)) {
8653 /* Prevent interrupts from HC on both ports */
8654 val = REG_RD(bp, HC_REG_CONFIG_1);
8655 REG_WR(bp, HC_REG_CONFIG_1,
8656 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8657 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8659 val = REG_RD(bp, HC_REG_CONFIG_0);
8660 REG_WR(bp, HC_REG_CONFIG_0,
8661 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8662 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8664 /* Prevent incomming interrupts in IGU */
8665 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8667 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8669 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8670 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8673 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
8674 close ? "closing" : "opening");
8678 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8680 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8682 /* Do some magic... */
8683 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8684 *magic_val = val & SHARED_MF_CLP_MAGIC;
8685 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8689 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
8691 * @bp: driver handle
8692 * @magic_val: old value of the `magic' bit.
8694 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8696 /* Restore the `magic' bit value... */
8697 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8698 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8699 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8703 * bnx2x_reset_mcp_prep - prepare for MCP reset.
8705 * @bp: driver handle
8706 * @magic_val: old value of 'magic' bit.
8708 * Takes care of CLP configurations.
8710 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8713 u32 validity_offset;
8715 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
8717 /* Set `magic' bit in order to save MF config */
8718 if (!CHIP_IS_E1(bp))
8719 bnx2x_clp_reset_prep(bp, magic_val);
8721 /* Get shmem offset */
8722 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8723 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8725 /* Clear validity map flags */
8727 REG_WR(bp, shmem + validity_offset, 0);
8730 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8731 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8734 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
8736 * @bp: driver handle
8738 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8740 /* special handling for emulation and FPGA,
8741 wait 10 times longer */
8742 if (CHIP_REV_IS_SLOW(bp))
8743 msleep(MCP_ONE_TIMEOUT*10);
8745 msleep(MCP_ONE_TIMEOUT);
8749 * initializes bp->common.shmem_base and waits for validity signature to appear
8751 static int bnx2x_init_shmem(struct bnx2x *bp)
8757 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8758 if (bp->common.shmem_base) {
8759 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8760 if (val & SHR_MEM_VALIDITY_MB)
8764 bnx2x_mcp_wait_one(bp);
8766 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
8768 BNX2X_ERR("BAD MCP validity signature\n");
8773 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8775 int rc = bnx2x_init_shmem(bp);
8777 /* Restore the `magic' bit value */
8778 if (!CHIP_IS_E1(bp))
8779 bnx2x_clp_reset_done(bp, magic_val);
8784 static void bnx2x_pxp_prep(struct bnx2x *bp)
8786 if (!CHIP_IS_E1(bp)) {
8787 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8788 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8794 * Reset the whole chip except for:
8796 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8799 * - MISC (including AEU)
8803 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
8805 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8806 u32 global_bits2, stay_reset2;
8809 * Bits that have to be set in reset_mask2 if we want to reset 'global'
8810 * (per chip) blocks.
8813 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
8814 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
8816 /* Don't reset the following blocks */
8818 MISC_REGISTERS_RESET_REG_1_RST_HC |
8819 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8820 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8823 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
8824 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8825 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8826 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8827 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8828 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8829 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8830 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
8831 MISC_REGISTERS_RESET_REG_2_RST_ATC |
8832 MISC_REGISTERS_RESET_REG_2_PGLC;
8835 * Keep the following blocks in reset:
8836 * - all xxMACs are handled by the bnx2x_link code.
8839 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
8840 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
8841 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
8842 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
8843 MISC_REGISTERS_RESET_REG_2_UMAC0 |
8844 MISC_REGISTERS_RESET_REG_2_UMAC1 |
8845 MISC_REGISTERS_RESET_REG_2_XMAC |
8846 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
8848 /* Full reset masks according to the chip */
8849 reset_mask1 = 0xffffffff;
8852 reset_mask2 = 0xffff;
8853 else if (CHIP_IS_E1H(bp))
8854 reset_mask2 = 0x1ffff;
8855 else if (CHIP_IS_E2(bp))
8856 reset_mask2 = 0xfffff;
8857 else /* CHIP_IS_E3 */
8858 reset_mask2 = 0x3ffffff;
8860 /* Don't reset global blocks unless we need to */
8862 reset_mask2 &= ~global_bits2;
8865 * In case of attention in the QM, we need to reset PXP
8866 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
8867 * because otherwise QM reset would release 'close the gates' shortly
8868 * before resetting the PXP, then the PSWRQ would send a write
8869 * request to PGLUE. Then when PXP is reset, PGLUE would try to
8870 * read the payload data from PSWWR, but PSWWR would not
8871 * respond. The write queue in PGLUE would stuck, dmae commands
8872 * would not return. Therefore it's important to reset the second
8873 * reset register (containing the
8874 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
8875 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
8878 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8879 reset_mask2 & (~not_reset_mask2));
8881 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8882 reset_mask1 & (~not_reset_mask1));
8887 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
8888 reset_mask2 & (~stay_reset2));
8893 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8898 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
8899 * It should get cleared in no more than 1s.
8901 * @bp: driver handle
8903 * It should get cleared in no more than 1s. Returns 0 if
8904 * pending writes bit gets cleared.
8906 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
8912 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
8917 usleep_range(1000, 1000);
8918 } while (cnt-- > 0);
8921 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
8929 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
8933 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8936 /* Empty the Tetris buffer, wait for 1s */
8938 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8939 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8940 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8941 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8942 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8943 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8944 ((port_is_idle_0 & 0x1) == 0x1) &&
8945 ((port_is_idle_1 & 0x1) == 0x1) &&
8946 (pgl_exp_rom2 == 0xffffffff))
8948 usleep_range(1000, 1000);
8949 } while (cnt-- > 0);
8952 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
8953 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8954 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8961 /* Close gates #2, #3 and #4 */
8962 bnx2x_set_234_gates(bp, true);
8964 /* Poll for IGU VQs for 57712 and newer chips */
8965 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
8969 /* TBD: Indicate that "process kill" is in progress to MCP */
8971 /* Clear "unprepared" bit */
8972 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8975 /* Make sure all is written to the chip before the reset */
8978 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8979 * PSWHST, GRC and PSWRD Tetris buffer.
8981 usleep_range(1000, 1000);
8983 /* Prepare to chip reset: */
8986 bnx2x_reset_mcp_prep(bp, &val);
8992 /* reset the chip */
8993 bnx2x_process_kill_chip_reset(bp, global);
8996 /* Recover after reset: */
8998 if (global && bnx2x_reset_mcp_comp(bp, val))
9001 /* TBD: Add resetting the NO_MCP mode DB here */
9006 /* Open the gates #2, #3 and #4 */
9007 bnx2x_set_234_gates(bp, false);
9009 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9010 * reset state, re-enable attentions. */
9015 static int bnx2x_leader_reset(struct bnx2x *bp)
9018 bool global = bnx2x_reset_is_global(bp);
9021 /* if not going to reset MCP - load "fake" driver to reset HW while
9022 * driver is owner of the HW
9024 if (!global && !BP_NOMCP(bp)) {
9025 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9026 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9028 BNX2X_ERR("MCP response failure, aborting\n");
9030 goto exit_leader_reset;
9032 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9033 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9034 BNX2X_ERR("MCP unexpected resp, aborting\n");
9036 goto exit_leader_reset2;
9038 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9040 BNX2X_ERR("MCP response failure, aborting\n");
9042 goto exit_leader_reset2;
9046 /* Try to recover after the failure */
9047 if (bnx2x_process_kill(bp, global)) {
9048 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9051 goto exit_leader_reset2;
9055 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9058 bnx2x_set_reset_done(bp);
9060 bnx2x_clear_reset_global(bp);
9063 /* unload "fake driver" if it was loaded */
9064 if (!global && !BP_NOMCP(bp)) {
9065 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9066 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9070 bnx2x_release_leader_lock(bp);
9075 static void bnx2x_recovery_failed(struct bnx2x *bp)
9077 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9079 /* Disconnect this device */
9080 netif_device_detach(bp->dev);
9083 * Block ifup for all function on this engine until "process kill"
9086 bnx2x_set_reset_in_progress(bp);
9088 /* Shut down the power */
9089 bnx2x_set_power_state(bp, PCI_D3hot);
9091 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9097 * Assumption: runs under rtnl lock. This together with the fact
9098 * that it's called only from bnx2x_sp_rtnl() ensure that it
9099 * will never be called when netif_running(bp->dev) is false.
9101 static void bnx2x_parity_recover(struct bnx2x *bp)
9103 bool global = false;
9104 u32 error_recovered, error_unrecovered;
9107 DP(NETIF_MSG_HW, "Handling parity\n");
9109 switch (bp->recovery_state) {
9110 case BNX2X_RECOVERY_INIT:
9111 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9112 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9113 WARN_ON(!is_parity);
9115 /* Try to get a LEADER_LOCK HW lock */
9116 if (bnx2x_trylock_leader_lock(bp)) {
9117 bnx2x_set_reset_in_progress(bp);
9119 * Check if there is a global attention and if
9120 * there was a global attention, set the global
9125 bnx2x_set_reset_global(bp);
9130 /* Stop the driver */
9131 /* If interface has been removed - break */
9132 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9135 bp->recovery_state = BNX2X_RECOVERY_WAIT;
9137 /* Ensure "is_leader", MCP command sequence and
9138 * "recovery_state" update values are seen on other
9144 case BNX2X_RECOVERY_WAIT:
9145 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9146 if (bp->is_leader) {
9147 int other_engine = BP_PATH(bp) ? 0 : 1;
9148 bool other_load_status =
9149 bnx2x_get_load_status(bp, other_engine);
9151 bnx2x_get_load_status(bp, BP_PATH(bp));
9152 global = bnx2x_reset_is_global(bp);
9155 * In case of a parity in a global block, let
9156 * the first leader that performs a
9157 * leader_reset() reset the global blocks in
9158 * order to clear global attentions. Otherwise
9159 * the the gates will remain closed for that
9163 (global && other_load_status)) {
9164 /* Wait until all other functions get
9167 schedule_delayed_work(&bp->sp_rtnl_task,
9171 /* If all other functions got down -
9172 * try to bring the chip back to
9173 * normal. In any case it's an exit
9174 * point for a leader.
9176 if (bnx2x_leader_reset(bp)) {
9177 bnx2x_recovery_failed(bp);
9181 /* If we are here, means that the
9182 * leader has succeeded and doesn't
9183 * want to be a leader any more. Try
9184 * to continue as a none-leader.
9188 } else { /* non-leader */
9189 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9190 /* Try to get a LEADER_LOCK HW lock as
9191 * long as a former leader may have
9192 * been unloaded by the user or
9193 * released a leadership by another
9196 if (bnx2x_trylock_leader_lock(bp)) {
9197 /* I'm a leader now! Restart a
9204 schedule_delayed_work(&bp->sp_rtnl_task,
9210 * If there was a global attention, wait
9211 * for it to be cleared.
9213 if (bnx2x_reset_is_global(bp)) {
9214 schedule_delayed_work(
9221 bp->eth_stats.recoverable_error;
9223 bp->eth_stats.unrecoverable_error;
9224 bp->recovery_state =
9225 BNX2X_RECOVERY_NIC_LOADING;
9226 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9227 error_unrecovered++;
9229 "Recovery failed. Power cycle needed\n");
9230 /* Disconnect this device */
9231 netif_device_detach(bp->dev);
9232 /* Shut down the power */
9233 bnx2x_set_power_state(
9237 bp->recovery_state =
9238 BNX2X_RECOVERY_DONE;
9242 bp->eth_stats.recoverable_error =
9244 bp->eth_stats.unrecoverable_error =
9256 static int bnx2x_close(struct net_device *dev);
9258 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9259 * scheduled on a general queue in order to prevent a dead lock.
9261 static void bnx2x_sp_rtnl_task(struct work_struct *work)
9263 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9267 if (!netif_running(bp->dev))
9270 /* if stop on error is defined no recovery flows should be executed */
9271 #ifdef BNX2X_STOP_ON_ERROR
9272 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9273 "you will need to reboot when done\n");
9274 goto sp_rtnl_not_reset;
9277 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9279 * Clear all pending SP commands as we are going to reset the
9282 bp->sp_rtnl_state = 0;
9285 bnx2x_parity_recover(bp);
9290 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9292 * Clear all pending SP commands as we are going to reset the
9295 bp->sp_rtnl_state = 0;
9298 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9299 bnx2x_nic_load(bp, LOAD_NORMAL);
9303 #ifdef BNX2X_STOP_ON_ERROR
9306 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9307 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9308 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9309 bnx2x_after_function_update(bp);
9311 * in case of fan failure we need to reset id if the "stop on error"
9312 * debug flag is set, since we trying to prevent permanent overheating
9315 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9316 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9317 netif_device_detach(bp->dev);
9318 bnx2x_close(bp->dev);
9325 /* end of nic load/unload */
9327 static void bnx2x_period_task(struct work_struct *work)
9329 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9331 if (!netif_running(bp->dev))
9332 goto period_task_exit;
9334 if (CHIP_REV_IS_SLOW(bp)) {
9335 BNX2X_ERR("period task called on emulation, ignoring\n");
9336 goto period_task_exit;
9339 bnx2x_acquire_phy_lock(bp);
9341 * The barrier is needed to ensure the ordering between the writing to
9342 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9347 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9349 /* Re-queue task in 1 sec */
9350 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9353 bnx2x_release_phy_lock(bp);
9359 * Init service functions
9362 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9364 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9365 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9366 return base + (BP_ABS_FUNC(bp)) * stride;
9369 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
9371 u32 reg = bnx2x_get_pretend_reg(bp);
9373 /* Flush all outstanding writes */
9376 /* Pretend to be function 0 */
9378 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
9380 /* From now we are in the "like-E1" mode */
9381 bnx2x_int_disable(bp);
9383 /* Flush all outstanding writes */
9386 /* Restore the original function */
9387 REG_WR(bp, reg, BP_ABS_FUNC(bp));
9391 static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
9394 bnx2x_int_disable(bp);
9396 bnx2x_undi_int_disable_e1h(bp);
9399 static void __devinit bnx2x_prev_unload_close_mac(struct bnx2x *bp)
9401 u32 val, base_addr, offset, mask, reset_reg;
9402 bool mac_stopped = false;
9403 u8 port = BP_PORT(bp);
9405 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9407 if (!CHIP_IS_E3(bp)) {
9408 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9409 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9410 if ((mask & reset_reg) && val) {
9412 BNX2X_DEV_INFO("Disable bmac Rx\n");
9413 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9414 : NIG_REG_INGRESS_BMAC0_MEM;
9415 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9416 : BIGMAC_REGISTER_BMAC_CONTROL;
9419 * use rd/wr since we cannot use dmae. This is safe
9420 * since MCP won't access the bus due to the request
9421 * to unload, and no function on the path can be
9422 * loaded at this time.
9424 wb_data[0] = REG_RD(bp, base_addr + offset);
9425 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9426 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9427 REG_WR(bp, base_addr + offset, wb_data[0]);
9428 REG_WR(bp, base_addr + offset + 0x4, wb_data[1]);
9431 BNX2X_DEV_INFO("Disable emac Rx\n");
9432 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4, 0);
9436 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9437 BNX2X_DEV_INFO("Disable xmac Rx\n");
9438 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9439 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9440 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9442 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9444 REG_WR(bp, base_addr + XMAC_REG_CTRL, 0);
9447 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9448 if (mask & reset_reg) {
9449 BNX2X_DEV_INFO("Disable umac Rx\n");
9450 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9451 REG_WR(bp, base_addr + UMAC_REG_COMMAND_CONFIG, 0);
9461 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9462 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9463 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9464 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9466 static void __devinit bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port,
9470 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9472 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9473 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9475 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9476 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9478 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9482 static int __devinit bnx2x_prev_mcp_done(struct bnx2x *bp)
9484 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9485 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9487 BNX2X_ERR("MCP response failure, aborting\n");
9494 static bool __devinit bnx2x_prev_is_path_marked(struct bnx2x *bp)
9496 struct bnx2x_prev_path_list *tmp_list;
9499 if (down_trylock(&bnx2x_prev_sem))
9502 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) {
9503 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9504 bp->pdev->bus->number == tmp_list->bus &&
9505 BP_PATH(bp) == tmp_list->path) {
9507 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9513 up(&bnx2x_prev_sem);
9518 static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp)
9520 struct bnx2x_prev_path_list *tmp_list;
9523 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9525 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9529 tmp_list->bus = bp->pdev->bus->number;
9530 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9531 tmp_list->path = BP_PATH(bp);
9533 rc = down_interruptible(&bnx2x_prev_sem);
9535 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9538 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n",
9540 list_add(&tmp_list->list, &bnx2x_prev_list);
9541 up(&bnx2x_prev_sem);
9547 static int __devinit bnx2x_do_flr(struct bnx2x *bp)
9551 struct pci_dev *dev = bp->pdev;
9554 if (CHIP_IS_E1x(bp)) {
9555 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9559 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9560 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9561 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9566 /* Wait for Transaction Pending bit clean */
9567 for (i = 0; i < 4; i++) {
9569 msleep((1 << (i - 1)) * 100);
9571 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9572 if (!(status & PCI_EXP_DEVSTA_TRPND))
9577 "transaction is not cleared; proceeding with reset anyway\n");
9581 BNX2X_DEV_INFO("Initiating FLR\n");
9582 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9587 static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9591 BNX2X_DEV_INFO("Uncommon unload Flow\n");
9593 /* Test if previous unload process was already finished for this path */
9594 if (bnx2x_prev_is_path_marked(bp))
9595 return bnx2x_prev_mcp_done(bp);
9597 /* If function has FLR capabilities, and existing FW version matches
9598 * the one required, then FLR will be sufficient to clean any residue
9599 * left by previous driver
9601 rc = bnx2x_test_firmware_version(bp, false);
9604 /* fw version is good */
9605 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
9606 rc = bnx2x_do_flr(bp);
9610 /* FLR was performed */
9611 BNX2X_DEV_INFO("FLR successful\n");
9615 BNX2X_DEV_INFO("Could not FLR\n");
9617 /* Close the MCP request, return failure*/
9618 rc = bnx2x_prev_mcp_done(bp);
9620 rc = BNX2X_PREV_WAIT_NEEDED;
9625 static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
9627 u32 reset_reg, tmp_reg = 0, rc;
9628 /* It is possible a previous function received 'common' answer,
9629 * but hasn't loaded yet, therefore creating a scenario of
9630 * multiple functions receiving 'common' on the same path.
9632 BNX2X_DEV_INFO("Common unload Flow\n");
9634 if (bnx2x_prev_is_path_marked(bp))
9635 return bnx2x_prev_mcp_done(bp);
9637 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9639 /* Reset should be performed after BRB is emptied */
9640 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9641 u32 timer_count = 1000;
9642 bool prev_undi = false;
9644 /* Close the MAC Rx to prevent BRB from filling up */
9645 bnx2x_prev_unload_close_mac(bp);
9647 /* Check if the UNDI driver was previously loaded
9648 * UNDI driver initializes CID offset for normal bell to 0x7
9650 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9651 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
9652 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9653 if (tmp_reg == 0x7) {
9654 BNX2X_DEV_INFO("UNDI previously loaded\n");
9656 /* clear the UNDI indication */
9657 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9660 /* wait until BRB is empty */
9661 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9662 while (timer_count) {
9663 u32 prev_brb = tmp_reg;
9665 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9669 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
9671 /* reset timer as long as BRB actually gets emptied */
9672 if (prev_brb > tmp_reg)
9677 /* If UNDI resides in memory, manually increment it */
9679 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
9685 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
9689 /* No packets are in the pipeline, path is ready for reset */
9690 bnx2x_reset_common(bp);
9692 rc = bnx2x_prev_mark_path(bp);
9694 bnx2x_prev_mcp_done(bp);
9698 return bnx2x_prev_mcp_done(bp);
9701 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
9702 * and boot began, or when kdump kernel was loaded. Either case would invalidate
9703 * the addresses of the transaction, resulting in was-error bit set in the pci
9704 * causing all hw-to-host pcie transactions to timeout. If this happened we want
9705 * to clear the interrupt which detected this from the pglueb and the was done
9708 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
9710 if (!CHIP_IS_E1x(bp)) {
9711 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
9712 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
9713 BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
9714 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
9720 static int __devinit bnx2x_prev_unload(struct bnx2x *bp)
9722 int time_counter = 10;
9723 u32 rc, fw, hw_lock_reg, hw_lock_val;
9724 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
9726 /* clear hw from errors which may have resulted from an interrupted
9729 bnx2x_prev_interrupted_dmae(bp);
9731 /* Release previously held locks */
9732 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
9733 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
9734 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
9736 hw_lock_val = (REG_RD(bp, hw_lock_reg));
9738 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
9739 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
9740 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9741 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
9744 BNX2X_DEV_INFO("Release Previously held hw lock\n");
9745 REG_WR(bp, hw_lock_reg, 0xffffffff);
9747 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
9749 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
9750 BNX2X_DEV_INFO("Release previously held alr\n");
9751 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
9756 /* Lock MCP using an unload request */
9757 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9759 BNX2X_ERR("MCP response failure, aborting\n");
9764 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9765 rc = bnx2x_prev_unload_common(bp);
9769 /* non-common reply from MCP night require looping */
9770 rc = bnx2x_prev_unload_uncommon(bp);
9771 if (rc != BNX2X_PREV_WAIT_NEEDED)
9775 } while (--time_counter);
9777 if (!time_counter || rc) {
9778 BNX2X_ERR("Failed unloading previous driver, aborting\n");
9782 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
9787 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9789 u32 val, val2, val3, val4, id, boot_mode;
9792 /* Get the chip revision id and number. */
9793 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9794 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9795 id = ((val & 0xffff) << 16);
9796 val = REG_RD(bp, MISC_REG_CHIP_REV);
9797 id |= ((val & 0xf) << 12);
9798 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9799 id |= ((val & 0xff) << 4);
9800 val = REG_RD(bp, MISC_REG_BOND_ID);
9802 bp->common.chip_id = id;
9804 /* force 57811 according to MISC register */
9805 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
9806 if (CHIP_IS_57810(bp))
9807 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
9808 (bp->common.chip_id & 0x0000FFFF);
9809 else if (CHIP_IS_57810_MF(bp))
9810 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
9811 (bp->common.chip_id & 0x0000FFFF);
9812 bp->common.chip_id |= 0x1;
9815 /* Set doorbell size */
9816 bp->db_size = (1 << BNX2X_DB_SHIFT);
9818 if (!CHIP_IS_E1x(bp)) {
9819 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
9821 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
9823 val = (val >> 1) & 1;
9824 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
9826 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
9829 if (CHIP_MODE_IS_4_PORT(bp))
9830 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
9832 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
9834 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
9835 bp->pfid = bp->pf_num; /* 0..7 */
9838 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
9840 bp->link_params.chip_id = bp->common.chip_id;
9841 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9843 val = (REG_RD(bp, 0x2874) & 0x55);
9844 if ((bp->common.chip_id & 0x1) ||
9845 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9846 bp->flags |= ONE_PORT_FLAG;
9847 BNX2X_DEV_INFO("single port device\n");
9850 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9851 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
9852 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9853 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9854 bp->common.flash_size, bp->common.flash_size);
9856 bnx2x_init_shmem(bp);
9860 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
9861 MISC_REG_GENERIC_CR_1 :
9862 MISC_REG_GENERIC_CR_0));
9864 bp->link_params.shmem_base = bp->common.shmem_base;
9865 bp->link_params.shmem2_base = bp->common.shmem2_base;
9866 if (SHMEM2_RD(bp, size) >
9867 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
9868 bp->link_params.lfa_base =
9869 REG_RD(bp, bp->common.shmem2_base +
9870 (u32)offsetof(struct shmem2_region,
9871 lfa_host_addr[BP_PORT(bp)]));
9873 bp->link_params.lfa_base = 0;
9874 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9875 bp->common.shmem_base, bp->common.shmem2_base);
9877 if (!bp->common.shmem_base) {
9878 BNX2X_DEV_INFO("MCP not active\n");
9879 bp->flags |= NO_MCP_FLAG;
9883 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9884 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9886 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9887 SHARED_HW_CFG_LED_MODE_MASK) >>
9888 SHARED_HW_CFG_LED_MODE_SHIFT);
9890 bp->link_params.feature_config_flags = 0;
9891 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9892 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9893 bp->link_params.feature_config_flags |=
9894 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9896 bp->link_params.feature_config_flags &=
9897 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9899 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9900 bp->common.bc_ver = val;
9901 BNX2X_DEV_INFO("bc_ver %X\n", val);
9902 if (val < BNX2X_BC_VER) {
9903 /* for now only warn
9904 * later we might need to enforce this */
9905 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
9908 bp->link_params.feature_config_flags |=
9909 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
9910 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9912 bp->link_params.feature_config_flags |=
9913 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
9914 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
9915 bp->link_params.feature_config_flags |=
9916 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
9917 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
9918 bp->link_params.feature_config_flags |=
9919 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
9920 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
9922 bp->link_params.feature_config_flags |=
9923 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
9924 FEATURE_CONFIG_MT_SUPPORT : 0;
9926 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
9927 BC_SUPPORTS_PFC_STATS : 0;
9929 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
9930 BC_SUPPORTS_FCOE_FEATURES : 0;
9932 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
9933 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
9934 boot_mode = SHMEM_RD(bp,
9935 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
9936 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
9937 switch (boot_mode) {
9938 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
9939 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
9941 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
9942 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
9944 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
9945 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
9947 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
9948 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
9952 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9953 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9955 BNX2X_DEV_INFO("%sWoL capable\n",
9956 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9958 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9959 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9960 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9961 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9963 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9964 val, val2, val3, val4);
9967 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
9968 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
9970 static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
9972 int pfid = BP_FUNC(bp);
9975 u8 fid, igu_sb_cnt = 0;
9977 bp->igu_base_sb = 0xff;
9978 if (CHIP_INT_MODE_IS_BC(bp)) {
9980 igu_sb_cnt = bp->igu_sb_cnt;
9981 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
9984 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
9985 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
9990 /* IGU in normal mode - read CAM */
9991 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
9993 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
9994 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
9997 if ((fid & IGU_FID_ENCODE_IS_PF)) {
9998 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10000 if (IGU_VEC(val) == 0)
10001 /* default status block */
10002 bp->igu_dsb_id = igu_sb_id;
10004 if (bp->igu_base_sb == 0xff)
10005 bp->igu_base_sb = igu_sb_id;
10011 #ifdef CONFIG_PCI_MSI
10012 /* Due to new PF resource allocation by MFW T7.4 and above, it's
10013 * optional that number of CAM entries will not be equal to the value
10014 * advertised in PCI.
10015 * Driver should use the minimal value of both as the actual status
10018 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10021 if (igu_sb_cnt == 0)
10022 BNX2X_ERR("CAM configuration error\n");
10025 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
10028 int cfg_size = 0, idx, port = BP_PORT(bp);
10030 /* Aggregation of supported attributes of all external phys */
10031 bp->port.supported[0] = 0;
10032 bp->port.supported[1] = 0;
10033 switch (bp->link_params.num_phys) {
10035 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10039 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10043 if (bp->link_params.multi_phy_config &
10044 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10045 bp->port.supported[1] =
10046 bp->link_params.phy[EXT_PHY1].supported;
10047 bp->port.supported[0] =
10048 bp->link_params.phy[EXT_PHY2].supported;
10050 bp->port.supported[0] =
10051 bp->link_params.phy[EXT_PHY1].supported;
10052 bp->port.supported[1] =
10053 bp->link_params.phy[EXT_PHY2].supported;
10059 if (!(bp->port.supported[0] || bp->port.supported[1])) {
10060 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10062 dev_info.port_hw_config[port].external_phy_config),
10064 dev_info.port_hw_config[port].external_phy_config2));
10068 if (CHIP_IS_E3(bp))
10069 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10071 switch (switch_cfg) {
10072 case SWITCH_CFG_1G:
10073 bp->port.phy_addr = REG_RD(
10074 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10076 case SWITCH_CFG_10G:
10077 bp->port.phy_addr = REG_RD(
10078 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10081 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10082 bp->port.link_config[0]);
10086 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10087 /* mask what we support according to speed_cap_mask per configuration */
10088 for (idx = 0; idx < cfg_size; idx++) {
10089 if (!(bp->link_params.speed_cap_mask[idx] &
10090 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10091 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10093 if (!(bp->link_params.speed_cap_mask[idx] &
10094 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10095 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10097 if (!(bp->link_params.speed_cap_mask[idx] &
10098 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10099 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10101 if (!(bp->link_params.speed_cap_mask[idx] &
10102 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10103 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10105 if (!(bp->link_params.speed_cap_mask[idx] &
10106 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10107 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10108 SUPPORTED_1000baseT_Full);
10110 if (!(bp->link_params.speed_cap_mask[idx] &
10111 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10112 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10114 if (!(bp->link_params.speed_cap_mask[idx] &
10115 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10116 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10120 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10121 bp->port.supported[1]);
10124 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
10126 u32 link_config, idx, cfg_size = 0;
10127 bp->port.advertising[0] = 0;
10128 bp->port.advertising[1] = 0;
10129 switch (bp->link_params.num_phys) {
10138 for (idx = 0; idx < cfg_size; idx++) {
10139 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10140 link_config = bp->port.link_config[idx];
10141 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10142 case PORT_FEATURE_LINK_SPEED_AUTO:
10143 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10144 bp->link_params.req_line_speed[idx] =
10146 bp->port.advertising[idx] |=
10147 bp->port.supported[idx];
10148 if (bp->link_params.phy[EXT_PHY1].type ==
10149 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10150 bp->port.advertising[idx] |=
10151 (SUPPORTED_100baseT_Half |
10152 SUPPORTED_100baseT_Full);
10154 /* force 10G, no AN */
10155 bp->link_params.req_line_speed[idx] =
10157 bp->port.advertising[idx] |=
10158 (ADVERTISED_10000baseT_Full |
10164 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10165 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10166 bp->link_params.req_line_speed[idx] =
10168 bp->port.advertising[idx] |=
10169 (ADVERTISED_10baseT_Full |
10172 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10174 bp->link_params.speed_cap_mask[idx]);
10179 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10180 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10181 bp->link_params.req_line_speed[idx] =
10183 bp->link_params.req_duplex[idx] =
10185 bp->port.advertising[idx] |=
10186 (ADVERTISED_10baseT_Half |
10189 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10191 bp->link_params.speed_cap_mask[idx]);
10196 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10197 if (bp->port.supported[idx] &
10198 SUPPORTED_100baseT_Full) {
10199 bp->link_params.req_line_speed[idx] =
10201 bp->port.advertising[idx] |=
10202 (ADVERTISED_100baseT_Full |
10205 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10207 bp->link_params.speed_cap_mask[idx]);
10212 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10213 if (bp->port.supported[idx] &
10214 SUPPORTED_100baseT_Half) {
10215 bp->link_params.req_line_speed[idx] =
10217 bp->link_params.req_duplex[idx] =
10219 bp->port.advertising[idx] |=
10220 (ADVERTISED_100baseT_Half |
10223 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10225 bp->link_params.speed_cap_mask[idx]);
10230 case PORT_FEATURE_LINK_SPEED_1G:
10231 if (bp->port.supported[idx] &
10232 SUPPORTED_1000baseT_Full) {
10233 bp->link_params.req_line_speed[idx] =
10235 bp->port.advertising[idx] |=
10236 (ADVERTISED_1000baseT_Full |
10239 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10241 bp->link_params.speed_cap_mask[idx]);
10246 case PORT_FEATURE_LINK_SPEED_2_5G:
10247 if (bp->port.supported[idx] &
10248 SUPPORTED_2500baseX_Full) {
10249 bp->link_params.req_line_speed[idx] =
10251 bp->port.advertising[idx] |=
10252 (ADVERTISED_2500baseX_Full |
10255 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10257 bp->link_params.speed_cap_mask[idx]);
10262 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10263 if (bp->port.supported[idx] &
10264 SUPPORTED_10000baseT_Full) {
10265 bp->link_params.req_line_speed[idx] =
10267 bp->port.advertising[idx] |=
10268 (ADVERTISED_10000baseT_Full |
10271 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
10273 bp->link_params.speed_cap_mask[idx]);
10277 case PORT_FEATURE_LINK_SPEED_20G:
10278 bp->link_params.req_line_speed[idx] = SPEED_20000;
10282 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10284 bp->link_params.req_line_speed[idx] =
10286 bp->port.advertising[idx] =
10287 bp->port.supported[idx];
10291 bp->link_params.req_flow_ctrl[idx] = (link_config &
10292 PORT_FEATURE_FLOW_CONTROL_MASK);
10293 if ((bp->link_params.req_flow_ctrl[idx] ==
10294 BNX2X_FLOW_CTRL_AUTO) &&
10295 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
10296 bp->link_params.req_flow_ctrl[idx] =
10297 BNX2X_FLOW_CTRL_NONE;
10300 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10301 bp->link_params.req_line_speed[idx],
10302 bp->link_params.req_duplex[idx],
10303 bp->link_params.req_flow_ctrl[idx],
10304 bp->port.advertising[idx]);
10308 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10310 mac_hi = cpu_to_be16(mac_hi);
10311 mac_lo = cpu_to_be32(mac_lo);
10312 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
10313 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
10316 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
10318 int port = BP_PORT(bp);
10320 u32 ext_phy_type, ext_phy_config, eee_mode;
10322 bp->link_params.bp = bp;
10323 bp->link_params.port = port;
10325 bp->link_params.lane_config =
10326 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10328 bp->link_params.speed_cap_mask[0] =
10330 dev_info.port_hw_config[port].speed_capability_mask);
10331 bp->link_params.speed_cap_mask[1] =
10333 dev_info.port_hw_config[port].speed_capability_mask2);
10334 bp->port.link_config[0] =
10335 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10337 bp->port.link_config[1] =
10338 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10340 bp->link_params.multi_phy_config =
10341 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10342 /* If the device is capable of WoL, set the default state according
10345 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10346 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10347 (config & PORT_FEATURE_WOL_ENABLED));
10349 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
10350 bp->link_params.lane_config,
10351 bp->link_params.speed_cap_mask[0],
10352 bp->port.link_config[0]);
10354 bp->link_params.switch_cfg = (bp->port.link_config[0] &
10355 PORT_FEATURE_CONNECTED_SWITCH_MASK);
10356 bnx2x_phy_probe(&bp->link_params);
10357 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10359 bnx2x_link_settings_requested(bp);
10362 * If connected directly, work with the internal PHY, otherwise, work
10363 * with the external PHY
10367 dev_info.port_hw_config[port].external_phy_config);
10368 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
10369 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
10370 bp->mdio.prtad = bp->port.phy_addr;
10372 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10373 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10375 XGXS_EXT_PHY_ADDR(ext_phy_config);
10377 /* Configure link feature according to nvram value */
10378 eee_mode = (((SHMEM_RD(bp, dev_info.
10379 port_feature_config[port].eee_power_mode)) &
10380 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10381 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10382 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10383 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10384 EEE_MODE_ENABLE_LPI |
10385 EEE_MODE_OUTPUT_TIME;
10387 bp->link_params.eee_mode = 0;
10391 void bnx2x_get_iscsi_info(struct bnx2x *bp)
10393 u32 no_flags = NO_ISCSI_FLAG;
10394 int port = BP_PORT(bp);
10395 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10396 drv_lic_key[port].max_iscsi_conn);
10398 if (!CNIC_SUPPORT(bp)) {
10399 bp->flags |= no_flags;
10403 /* Get the number of maximum allowed iSCSI connections */
10404 bp->cnic_eth_dev.max_iscsi_conn =
10405 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10406 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10408 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10409 bp->cnic_eth_dev.max_iscsi_conn);
10412 * If maximum allowed number of connections is zero -
10413 * disable the feature.
10415 if (!bp->cnic_eth_dev.max_iscsi_conn)
10416 bp->flags |= no_flags;
10420 static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10423 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10424 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10425 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10426 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10429 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10430 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10431 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10432 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10434 static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
10436 int port = BP_PORT(bp);
10437 int func = BP_ABS_FUNC(bp);
10438 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10439 drv_lic_key[port].max_fcoe_conn);
10441 if (!CNIC_SUPPORT(bp)) {
10442 bp->flags |= NO_FCOE_FLAG;
10446 /* Get the number of maximum allowed FCoE connections */
10447 bp->cnic_eth_dev.max_fcoe_conn =
10448 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10449 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10451 /* Read the WWN: */
10454 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10456 dev_info.port_hw_config[port].
10457 fcoe_wwn_port_name_upper);
10458 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10460 dev_info.port_hw_config[port].
10461 fcoe_wwn_port_name_lower);
10464 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10466 dev_info.port_hw_config[port].
10467 fcoe_wwn_node_name_upper);
10468 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10470 dev_info.port_hw_config[port].
10471 fcoe_wwn_node_name_lower);
10472 } else if (!IS_MF_SD(bp)) {
10474 * Read the WWN info only if the FCoE feature is enabled for
10477 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
10478 bnx2x_get_ext_wwn_info(bp, func);
10480 } else if (IS_MF_FCOE_SD(bp))
10481 bnx2x_get_ext_wwn_info(bp, func);
10483 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10486 * If maximum allowed number of connections is zero -
10487 * disable the feature.
10489 if (!bp->cnic_eth_dev.max_fcoe_conn)
10490 bp->flags |= NO_FCOE_FLAG;
10493 static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
10496 * iSCSI may be dynamically disabled but reading
10497 * info here we will decrease memory usage by driver
10498 * if the feature is disabled for good
10500 bnx2x_get_iscsi_info(bp);
10501 bnx2x_get_fcoe_info(bp);
10504 static void __devinit bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10507 int func = BP_ABS_FUNC(bp);
10508 int port = BP_PORT(bp);
10509 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10510 u8 *fip_mac = bp->fip_mac;
10513 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
10514 * FCoE MAC then the appropriate feature should be disabled.
10515 * In non SD mode features configuration comes from struct
10518 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
10519 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10520 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10521 val2 = MF_CFG_RD(bp, func_ext_config[func].
10522 iscsi_mac_addr_upper);
10523 val = MF_CFG_RD(bp, func_ext_config[func].
10524 iscsi_mac_addr_lower);
10525 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10527 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10529 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10532 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10533 val2 = MF_CFG_RD(bp, func_ext_config[func].
10534 fcoe_mac_addr_upper);
10535 val = MF_CFG_RD(bp, func_ext_config[func].
10536 fcoe_mac_addr_lower);
10537 bnx2x_set_mac_buf(fip_mac, val, val2);
10539 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10541 bp->flags |= NO_FCOE_FLAG;
10544 bp->mf_ext_config = cfg;
10546 } else { /* SD MODE */
10547 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10548 /* use primary mac as iscsi mac */
10549 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10551 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10553 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10554 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10555 /* use primary mac as fip mac */
10556 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10557 BNX2X_DEV_INFO("SD FCoE MODE\n");
10559 ("Read FIP MAC: %pM\n", fip_mac);
10563 if (IS_MF_STORAGE_SD(bp))
10564 /* Zero primary MAC configuration */
10565 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10567 if (IS_MF_FCOE_AFEX(bp))
10568 /* use FIP MAC as primary MAC */
10569 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10572 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10574 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10576 bnx2x_set_mac_buf(iscsi_mac, val, val2);
10578 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10579 fcoe_fip_mac_upper);
10580 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
10581 fcoe_fip_mac_lower);
10582 bnx2x_set_mac_buf(fip_mac, val, val2);
10585 /* Disable iSCSI OOO if MAC configuration is invalid. */
10586 if (!is_valid_ether_addr(iscsi_mac)) {
10587 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
10588 memset(iscsi_mac, 0, ETH_ALEN);
10591 /* Disable FCoE if MAC configuration is invalid. */
10592 if (!is_valid_ether_addr(fip_mac)) {
10593 bp->flags |= NO_FCOE_FLAG;
10594 memset(bp->fip_mac, 0, ETH_ALEN);
10598 static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
10601 int func = BP_ABS_FUNC(bp);
10602 int port = BP_PORT(bp);
10604 /* Zero primary MAC configuration */
10605 memset(bp->dev->dev_addr, 0, ETH_ALEN);
10607 if (BP_NOMCP(bp)) {
10608 BNX2X_ERROR("warning: random MAC workaround active\n");
10609 eth_hw_addr_random(bp->dev);
10610 } else if (IS_MF(bp)) {
10611 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
10612 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
10613 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
10614 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
10615 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10617 if (CNIC_SUPPORT(bp))
10618 bnx2x_get_cnic_mac_hwinfo(bp);
10620 /* in SF read MACs from port configuration */
10621 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
10622 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
10623 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
10625 if (CNIC_SUPPORT(bp))
10626 bnx2x_get_cnic_mac_hwinfo(bp);
10629 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
10630 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
10632 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
10633 dev_err(&bp->pdev->dev,
10634 "bad Ethernet MAC address configuration: %pM\n"
10635 "change it manually before bringing up the appropriate network interface\n",
10636 bp->dev->dev_addr);
10641 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10643 int /*abs*/func = BP_ABS_FUNC(bp);
10648 bnx2x_get_common_hwinfo(bp);
10651 * initialize IGU parameters
10653 if (CHIP_IS_E1x(bp)) {
10654 bp->common.int_block = INT_BLOCK_HC;
10656 bp->igu_dsb_id = DEF_SB_IGU_ID;
10657 bp->igu_base_sb = 0;
10659 bp->common.int_block = INT_BLOCK_IGU;
10661 /* do not allow device reset during IGU info preocessing */
10662 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10664 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
10666 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10669 BNX2X_DEV_INFO("FORCING Normal Mode\n");
10671 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
10672 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
10673 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
10675 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10677 usleep_range(1000, 1000);
10680 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
10681 dev_err(&bp->pdev->dev,
10682 "FORCING Normal Mode failed!!!\n");
10687 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
10688 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
10689 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
10691 BNX2X_DEV_INFO("IGU Normal Mode\n");
10693 bnx2x_get_igu_cam_info(bp);
10695 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
10699 * set base FW non-default (fast path) status block id, this value is
10700 * used to initialize the fw_sb_id saved on the fp/queue structure to
10701 * determine the id used by the FW.
10703 if (CHIP_IS_E1x(bp))
10704 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
10706 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
10707 * the same queue are indicated on the same IGU SB). So we prefer
10708 * FW and IGU SBs to be the same value.
10710 bp->base_fw_ndsb = bp->igu_base_sb;
10712 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
10713 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
10714 bp->igu_sb_cnt, bp->base_fw_ndsb);
10717 * Initialize MF configuration
10724 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
10725 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
10726 bp->common.shmem2_base, SHMEM2_RD(bp, size),
10727 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
10729 if (SHMEM2_HAS(bp, mf_cfg_addr))
10730 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
10732 bp->common.mf_cfg_base = bp->common.shmem_base +
10733 offsetof(struct shmem_region, func_mb) +
10734 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
10736 * get mf configuration:
10737 * 1. existence of MF configuration
10738 * 2. MAC address must be legal (check only upper bytes)
10739 * for Switch-Independent mode;
10740 * OVLAN must be legal for Switch-Dependent mode
10741 * 3. SF_MODE configures specific MF mode
10743 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
10744 /* get mf configuration */
10746 dev_info.shared_feature_config.config);
10747 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
10750 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
10751 val = MF_CFG_RD(bp, func_mf_config[func].
10753 /* check for legal mac (upper bytes)*/
10754 if (val != 0xffff) {
10755 bp->mf_mode = MULTI_FUNCTION_SI;
10756 bp->mf_config[vn] = MF_CFG_RD(bp,
10757 func_mf_config[func].config);
10759 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10761 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10762 if ((!CHIP_IS_E1x(bp)) &&
10763 (MF_CFG_RD(bp, func_mf_config[func].
10764 mac_upper) != 0xffff) &&
10766 afex_driver_support))) {
10767 bp->mf_mode = MULTI_FUNCTION_AFEX;
10768 bp->mf_config[vn] = MF_CFG_RD(bp,
10769 func_mf_config[func].config);
10771 BNX2X_DEV_INFO("can not configure afex mode\n");
10774 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
10775 /* get OV configuration */
10776 val = MF_CFG_RD(bp,
10777 func_mf_config[FUNC_0].e1hov_tag);
10778 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
10780 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
10781 bp->mf_mode = MULTI_FUNCTION_SD;
10782 bp->mf_config[vn] = MF_CFG_RD(bp,
10783 func_mf_config[func].config);
10785 BNX2X_DEV_INFO("illegal OV for SD\n");
10788 /* Unknown configuration: reset mf_config */
10789 bp->mf_config[vn] = 0;
10790 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
10794 BNX2X_DEV_INFO("%s function mode\n",
10795 IS_MF(bp) ? "multi" : "single");
10797 switch (bp->mf_mode) {
10798 case MULTI_FUNCTION_SD:
10799 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
10800 FUNC_MF_CFG_E1HOV_TAG_MASK;
10801 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
10803 bp->path_has_ovlan = true;
10805 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
10806 func, bp->mf_ov, bp->mf_ov);
10808 dev_err(&bp->pdev->dev,
10809 "No valid MF OV for func %d, aborting\n",
10814 case MULTI_FUNCTION_AFEX:
10815 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
10817 case MULTI_FUNCTION_SI:
10818 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
10823 dev_err(&bp->pdev->dev,
10824 "VN %d is in a single function mode, aborting\n",
10831 /* check if other port on the path needs ovlan:
10832 * Since MF configuration is shared between ports
10833 * Possible mixed modes are only
10834 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
10836 if (CHIP_MODE_IS_4_PORT(bp) &&
10837 !bp->path_has_ovlan &&
10839 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
10840 u8 other_port = !BP_PORT(bp);
10841 u8 other_func = BP_PATH(bp) + 2*other_port;
10842 val = MF_CFG_RD(bp,
10843 func_mf_config[other_func].e1hov_tag);
10844 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
10845 bp->path_has_ovlan = true;
10849 /* adjust igu_sb_cnt to MF for E1x */
10850 if (CHIP_IS_E1x(bp) && IS_MF(bp))
10851 bp->igu_sb_cnt /= E1HVN_MAX;
10854 bnx2x_get_port_hwinfo(bp);
10856 /* Get MAC addresses */
10857 bnx2x_get_mac_hwinfo(bp);
10859 bnx2x_get_cnic_info(bp);
10864 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
10866 int cnt, i, block_end, rodi;
10867 char vpd_start[BNX2X_VPD_LEN+1];
10868 char str_id_reg[VENDOR_ID_LEN+1];
10869 char str_id_cap[VENDOR_ID_LEN+1];
10871 char *vpd_extended_data = NULL;
10874 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
10875 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
10877 if (cnt < BNX2X_VPD_LEN)
10878 goto out_not_found;
10880 /* VPD RO tag should be first tag after identifier string, hence
10881 * we should be able to find it in first BNX2X_VPD_LEN chars
10883 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
10884 PCI_VPD_LRDT_RO_DATA);
10886 goto out_not_found;
10888 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
10889 pci_vpd_lrdt_size(&vpd_start[i]);
10891 i += PCI_VPD_LRDT_TAG_SIZE;
10893 if (block_end > BNX2X_VPD_LEN) {
10894 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
10895 if (vpd_extended_data == NULL)
10896 goto out_not_found;
10898 /* read rest of vpd image into vpd_extended_data */
10899 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
10900 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
10901 block_end - BNX2X_VPD_LEN,
10902 vpd_extended_data + BNX2X_VPD_LEN);
10903 if (cnt < (block_end - BNX2X_VPD_LEN))
10904 goto out_not_found;
10905 vpd_data = vpd_extended_data;
10907 vpd_data = vpd_start;
10909 /* now vpd_data holds full vpd content in both cases */
10911 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
10912 PCI_VPD_RO_KEYWORD_MFR_ID);
10914 goto out_not_found;
10916 len = pci_vpd_info_field_size(&vpd_data[rodi]);
10918 if (len != VENDOR_ID_LEN)
10919 goto out_not_found;
10921 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
10923 /* vendor specific info */
10924 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
10925 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
10926 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
10927 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
10929 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
10930 PCI_VPD_RO_KEYWORD_VENDOR0);
10932 len = pci_vpd_info_field_size(&vpd_data[rodi]);
10934 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
10936 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
10937 memcpy(bp->fw_ver, &vpd_data[rodi], len);
10938 bp->fw_ver[len] = ' ';
10941 kfree(vpd_extended_data);
10945 kfree(vpd_extended_data);
10949 static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10953 if (CHIP_REV_IS_FPGA(bp))
10954 SET_FLAGS(flags, MODE_FPGA);
10955 else if (CHIP_REV_IS_EMUL(bp))
10956 SET_FLAGS(flags, MODE_EMUL);
10958 SET_FLAGS(flags, MODE_ASIC);
10960 if (CHIP_MODE_IS_4_PORT(bp))
10961 SET_FLAGS(flags, MODE_PORT4);
10963 SET_FLAGS(flags, MODE_PORT2);
10965 if (CHIP_IS_E2(bp))
10966 SET_FLAGS(flags, MODE_E2);
10967 else if (CHIP_IS_E3(bp)) {
10968 SET_FLAGS(flags, MODE_E3);
10969 if (CHIP_REV(bp) == CHIP_REV_Ax)
10970 SET_FLAGS(flags, MODE_E3_A0);
10971 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
10972 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
10976 SET_FLAGS(flags, MODE_MF);
10977 switch (bp->mf_mode) {
10978 case MULTI_FUNCTION_SD:
10979 SET_FLAGS(flags, MODE_MF_SD);
10981 case MULTI_FUNCTION_SI:
10982 SET_FLAGS(flags, MODE_MF_SI);
10984 case MULTI_FUNCTION_AFEX:
10985 SET_FLAGS(flags, MODE_MF_AFEX);
10989 SET_FLAGS(flags, MODE_SF);
10991 #if defined(__LITTLE_ENDIAN)
10992 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
10993 #else /*(__BIG_ENDIAN)*/
10994 SET_FLAGS(flags, MODE_BIG_ENDIAN);
10996 INIT_MODE_FLAGS(bp) = flags;
10999 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
11004 mutex_init(&bp->port.phy_mutex);
11005 mutex_init(&bp->fw_mb_mutex);
11006 spin_lock_init(&bp->stats_lock);
11009 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11010 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11011 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11012 rc = bnx2x_get_hwinfo(bp);
11016 bnx2x_set_modes_bitmap(bp);
11018 rc = bnx2x_alloc_mem_bp(bp);
11022 bnx2x_read_fwinfo(bp);
11024 func = BP_FUNC(bp);
11026 /* need to reset chip if undi was active */
11027 if (!BP_NOMCP(bp)) {
11030 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11031 DRV_MSG_SEQ_NUMBER_MASK;
11032 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11034 bnx2x_prev_unload(bp);
11038 if (CHIP_REV_IS_FPGA(bp))
11039 dev_err(&bp->pdev->dev, "FPGA detected\n");
11041 if (BP_NOMCP(bp) && (func == 0))
11042 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11044 bp->disable_tpa = disable_tpa;
11045 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11047 /* Set TPA flags */
11048 if (bp->disable_tpa) {
11049 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11050 bp->dev->features &= ~NETIF_F_LRO;
11052 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11053 bp->dev->features |= NETIF_F_LRO;
11056 if (CHIP_IS_E1(bp))
11057 bp->dropless_fc = 0;
11059 bp->dropless_fc = dropless_fc;
11063 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11065 /* make sure that the numbers are in the right granularity */
11066 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11067 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11069 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11071 init_timer(&bp->timer);
11072 bp->timer.expires = jiffies + bp->current_interval;
11073 bp->timer.data = (unsigned long) bp;
11074 bp->timer.function = bnx2x_timer;
11076 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11077 bnx2x_dcbx_init_params(bp);
11079 if (CHIP_IS_E1x(bp))
11080 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11082 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11084 /* multiple tx priority */
11085 if (CHIP_IS_E1x(bp))
11086 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11087 if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11088 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11089 if (CHIP_IS_E3B0(bp))
11090 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11092 /* We need at least one default status block for slow-path events,
11093 * second status block for the L2 queue, and a third status block for
11094 * CNIC if supproted.
11096 if (CNIC_SUPPORT(bp))
11097 bp->min_msix_vec_cnt = 3;
11099 bp->min_msix_vec_cnt = 2;
11100 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11106 /****************************************************************************
11107 * General service functions
11108 ****************************************************************************/
11111 * net_device service functions
11114 /* called with rtnl_lock */
11115 static int bnx2x_open(struct net_device *dev)
11117 struct bnx2x *bp = netdev_priv(dev);
11118 bool global = false;
11119 int other_engine = BP_PATH(bp) ? 0 : 1;
11120 bool other_load_status, load_status;
11122 bp->stats_init = true;
11124 netif_carrier_off(dev);
11126 bnx2x_set_power_state(bp, PCI_D0);
11128 other_load_status = bnx2x_get_load_status(bp, other_engine);
11129 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11132 * If parity had happen during the unload, then attentions
11133 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11134 * want the first function loaded on the current engine to
11135 * complete the recovery.
11137 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11138 bnx2x_chk_parity_attn(bp, &global, true))
11141 * If there are attentions and they are in a global
11142 * blocks, set the GLOBAL_RESET bit regardless whether
11143 * it will be this function that will complete the
11147 bnx2x_set_reset_global(bp);
11150 * Only the first function on the current engine should
11151 * try to recover in open. In case of attentions in
11152 * global blocks only the first in the chip should try
11155 if ((!load_status &&
11156 (!global || !other_load_status)) &&
11157 bnx2x_trylock_leader_lock(bp) &&
11158 !bnx2x_leader_reset(bp)) {
11159 netdev_info(bp->dev, "Recovered in open\n");
11163 /* recovery has failed... */
11164 bnx2x_set_power_state(bp, PCI_D3hot);
11165 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11167 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11168 "If you still see this message after a few retries then power cycle is required.\n");
11173 bp->recovery_state = BNX2X_RECOVERY_DONE;
11174 return bnx2x_nic_load(bp, LOAD_OPEN);
11177 /* called with rtnl_lock */
11178 static int bnx2x_close(struct net_device *dev)
11180 struct bnx2x *bp = netdev_priv(dev);
11182 /* Unload the driver, release IRQs */
11183 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11186 bnx2x_set_power_state(bp, PCI_D3hot);
11191 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11192 struct bnx2x_mcast_ramrod_params *p)
11194 int mc_count = netdev_mc_count(bp->dev);
11195 struct bnx2x_mcast_list_elem *mc_mac =
11196 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11197 struct netdev_hw_addr *ha;
11202 INIT_LIST_HEAD(&p->mcast_list);
11204 netdev_for_each_mc_addr(ha, bp->dev) {
11205 mc_mac->mac = bnx2x_mc_addr(ha);
11206 list_add_tail(&mc_mac->link, &p->mcast_list);
11210 p->mcast_list_len = mc_count;
11215 static void bnx2x_free_mcast_macs_list(
11216 struct bnx2x_mcast_ramrod_params *p)
11218 struct bnx2x_mcast_list_elem *mc_mac =
11219 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11227 * bnx2x_set_uc_list - configure a new unicast MACs list.
11229 * @bp: driver handle
11231 * We will use zero (0) as a MAC type for these MACs.
11233 static int bnx2x_set_uc_list(struct bnx2x *bp)
11236 struct net_device *dev = bp->dev;
11237 struct netdev_hw_addr *ha;
11238 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11239 unsigned long ramrod_flags = 0;
11241 /* First schedule a cleanup up of old configuration */
11242 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11244 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11248 netdev_for_each_uc_addr(ha, dev) {
11249 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11250 BNX2X_UC_LIST_MAC, &ramrod_flags);
11251 if (rc == -EEXIST) {
11253 "Failed to schedule ADD operations: %d\n", rc);
11254 /* do not treat adding same MAC as error */
11257 } else if (rc < 0) {
11259 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11265 /* Execute the pending commands */
11266 __set_bit(RAMROD_CONT, &ramrod_flags);
11267 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11268 BNX2X_UC_LIST_MAC, &ramrod_flags);
11271 static int bnx2x_set_mc_list(struct bnx2x *bp)
11273 struct net_device *dev = bp->dev;
11274 struct bnx2x_mcast_ramrod_params rparam = {NULL};
11277 rparam.mcast_obj = &bp->mcast_obj;
11279 /* first, clear all configured multicast MACs */
11280 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11282 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
11286 /* then, configure a new MACs list */
11287 if (netdev_mc_count(dev)) {
11288 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11290 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11295 /* Now add the new MACs */
11296 rc = bnx2x_config_mcast(bp, &rparam,
11297 BNX2X_MCAST_CMD_ADD);
11299 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11302 bnx2x_free_mcast_macs_list(&rparam);
11309 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
11310 void bnx2x_set_rx_mode(struct net_device *dev)
11312 struct bnx2x *bp = netdev_priv(dev);
11313 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11315 if (bp->state != BNX2X_STATE_OPEN) {
11316 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11320 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11322 if (dev->flags & IFF_PROMISC)
11323 rx_mode = BNX2X_RX_MODE_PROMISC;
11324 else if ((dev->flags & IFF_ALLMULTI) ||
11325 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11327 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11329 /* some multicasts */
11330 if (bnx2x_set_mc_list(bp) < 0)
11331 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11333 if (bnx2x_set_uc_list(bp) < 0)
11334 rx_mode = BNX2X_RX_MODE_PROMISC;
11337 bp->rx_mode = rx_mode;
11338 /* handle ISCSI SD mode */
11339 if (IS_MF_ISCSI_SD(bp))
11340 bp->rx_mode = BNX2X_RX_MODE_NONE;
11342 /* Schedule the rx_mode command */
11343 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11344 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11348 bnx2x_set_storm_rx_mode(bp);
11351 /* called with rtnl_lock */
11352 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11353 int devad, u16 addr)
11355 struct bnx2x *bp = netdev_priv(netdev);
11359 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11360 prtad, devad, addr);
11362 /* The HW expects different devad if CL22 is used */
11363 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11365 bnx2x_acquire_phy_lock(bp);
11366 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
11367 bnx2x_release_phy_lock(bp);
11368 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11375 /* called with rtnl_lock */
11376 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11377 u16 addr, u16 value)
11379 struct bnx2x *bp = netdev_priv(netdev);
11383 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11384 prtad, devad, addr, value);
11386 /* The HW expects different devad if CL22 is used */
11387 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11389 bnx2x_acquire_phy_lock(bp);
11390 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
11391 bnx2x_release_phy_lock(bp);
11395 /* called with rtnl_lock */
11396 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11398 struct bnx2x *bp = netdev_priv(dev);
11399 struct mii_ioctl_data *mdio = if_mii(ifr);
11401 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11402 mdio->phy_id, mdio->reg_num, mdio->val_in);
11404 if (!netif_running(dev))
11407 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11410 #ifdef CONFIG_NET_POLL_CONTROLLER
11411 static void poll_bnx2x(struct net_device *dev)
11413 struct bnx2x *bp = netdev_priv(dev);
11416 for_each_eth_queue(bp, i) {
11417 struct bnx2x_fastpath *fp = &bp->fp[i];
11418 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11423 static int bnx2x_validate_addr(struct net_device *dev)
11425 struct bnx2x *bp = netdev_priv(dev);
11427 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
11428 BNX2X_ERR("Non-valid Ethernet address\n");
11429 return -EADDRNOTAVAIL;
11434 static const struct net_device_ops bnx2x_netdev_ops = {
11435 .ndo_open = bnx2x_open,
11436 .ndo_stop = bnx2x_close,
11437 .ndo_start_xmit = bnx2x_start_xmit,
11438 .ndo_select_queue = bnx2x_select_queue,
11439 .ndo_set_rx_mode = bnx2x_set_rx_mode,
11440 .ndo_set_mac_address = bnx2x_change_mac_addr,
11441 .ndo_validate_addr = bnx2x_validate_addr,
11442 .ndo_do_ioctl = bnx2x_ioctl,
11443 .ndo_change_mtu = bnx2x_change_mtu,
11444 .ndo_fix_features = bnx2x_fix_features,
11445 .ndo_set_features = bnx2x_set_features,
11446 .ndo_tx_timeout = bnx2x_tx_timeout,
11447 #ifdef CONFIG_NET_POLL_CONTROLLER
11448 .ndo_poll_controller = poll_bnx2x,
11450 .ndo_setup_tc = bnx2x_setup_tc,
11452 #ifdef NETDEV_FCOE_WWNN
11453 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11457 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
11459 struct device *dev = &bp->pdev->dev;
11461 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
11462 bp->flags |= USING_DAC_FLAG;
11463 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
11464 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
11467 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
11468 dev_err(dev, "System does not support DMA, aborting\n");
11475 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11476 struct net_device *dev,
11477 unsigned long board_type)
11482 bool chip_is_e1x = (board_type == BCM57710 ||
11483 board_type == BCM57711 ||
11484 board_type == BCM57711E);
11486 SET_NETDEV_DEV(dev, &pdev->dev);
11487 bp = netdev_priv(dev);
11493 rc = pci_enable_device(pdev);
11495 dev_err(&bp->pdev->dev,
11496 "Cannot enable PCI device, aborting\n");
11500 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11501 dev_err(&bp->pdev->dev,
11502 "Cannot find PCI device base address, aborting\n");
11504 goto err_out_disable;
11507 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11508 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
11509 " base address, aborting\n");
11511 goto err_out_disable;
11514 if (atomic_read(&pdev->enable_cnt) == 1) {
11515 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11517 dev_err(&bp->pdev->dev,
11518 "Cannot obtain PCI resources, aborting\n");
11519 goto err_out_disable;
11522 pci_set_master(pdev);
11523 pci_save_state(pdev);
11526 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11527 if (bp->pm_cap == 0) {
11528 dev_err(&bp->pdev->dev,
11529 "Cannot find power management capability, aborting\n");
11531 goto err_out_release;
11534 if (!pci_is_pcie(pdev)) {
11535 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
11537 goto err_out_release;
11540 rc = bnx2x_set_coherency_mask(bp);
11542 goto err_out_release;
11544 dev->mem_start = pci_resource_start(pdev, 0);
11545 dev->base_addr = dev->mem_start;
11546 dev->mem_end = pci_resource_end(pdev, 0);
11548 dev->irq = pdev->irq;
11550 bp->regview = pci_ioremap_bar(pdev, 0);
11551 if (!bp->regview) {
11552 dev_err(&bp->pdev->dev,
11553 "Cannot map register space, aborting\n");
11555 goto err_out_release;
11558 /* In E1/E1H use pci device function given by kernel.
11559 * In E2/E3 read physical function from ME register since these chips
11560 * support Physical Device Assignment where kernel BDF maybe arbitrary
11561 * (depending on hypervisor).
11564 bp->pf_num = PCI_FUNC(pdev->devfn);
11565 else {/* chip is E2/3*/
11566 pci_read_config_dword(bp->pdev,
11567 PCICFG_ME_REGISTER, &pci_cfg_dword);
11568 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
11569 ME_REG_ABS_PF_NUM_SHIFT);
11571 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
11573 bnx2x_set_power_state(bp, PCI_D0);
11575 /* clean indirect addresses */
11576 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11577 PCICFG_VENDOR_ID_OFFSET);
11579 * Clean the following indirect addresses for all functions since it
11580 * is not used by the driver.
11582 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
11583 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
11584 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
11585 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
11588 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
11589 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
11590 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
11591 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
11595 * Enable internal target-read (in case we are probed after PF FLR).
11596 * Must be done prior to any BAR read access. Only for 57712 and up
11599 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
11601 dev->watchdog_timeo = TX_TIMEOUT;
11603 dev->netdev_ops = &bnx2x_netdev_ops;
11604 bnx2x_set_ethtool_ops(dev);
11606 dev->priv_flags |= IFF_UNICAST_FLT;
11608 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11609 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
11610 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
11611 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
11613 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11614 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
11616 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
11617 if (bp->flags & USING_DAC_FLAG)
11618 dev->features |= NETIF_F_HIGHDMA;
11620 /* Add Loopback capability to the device */
11621 dev->hw_features |= NETIF_F_LOOPBACK;
11624 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
11627 /* get_port_hwinfo() will set prtad and mmds properly */
11628 bp->mdio.prtad = MDIO_PRTAD_NONE;
11630 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11631 bp->mdio.dev = dev;
11632 bp->mdio.mdio_read = bnx2x_mdio_read;
11633 bp->mdio.mdio_write = bnx2x_mdio_write;
11638 if (atomic_read(&pdev->enable_cnt) == 1)
11639 pci_release_regions(pdev);
11642 pci_disable_device(pdev);
11643 pci_set_drvdata(pdev, NULL);
11649 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11650 int *width, int *speed)
11652 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11654 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11656 /* return value of 1=2.5GHz 2=5GHz */
11657 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11660 static int bnx2x_check_firmware(struct bnx2x *bp)
11662 const struct firmware *firmware = bp->firmware;
11663 struct bnx2x_fw_file_hdr *fw_hdr;
11664 struct bnx2x_fw_file_section *sections;
11665 u32 offset, len, num_ops;
11670 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
11671 BNX2X_ERR("Wrong FW size\n");
11675 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11676 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11678 /* Make sure none of the offsets and sizes make us read beyond
11679 * the end of the firmware data */
11680 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11681 offset = be32_to_cpu(sections[i].offset);
11682 len = be32_to_cpu(sections[i].len);
11683 if (offset + len > firmware->size) {
11684 BNX2X_ERR("Section %d length is out of bounds\n", i);
11689 /* Likewise for the init_ops offsets */
11690 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11691 ops_offsets = (u16 *)(firmware->data + offset);
11692 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11694 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11695 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11696 BNX2X_ERR("Section offset %d is out of bounds\n", i);
11701 /* Check FW version */
11702 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11703 fw_ver = firmware->data + offset;
11704 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11705 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11706 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11707 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11708 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11709 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
11710 BCM_5710_FW_MAJOR_VERSION,
11711 BCM_5710_FW_MINOR_VERSION,
11712 BCM_5710_FW_REVISION_VERSION,
11713 BCM_5710_FW_ENGINEERING_VERSION);
11720 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11722 const __be32 *source = (const __be32 *)_source;
11723 u32 *target = (u32 *)_target;
11726 for (i = 0; i < n/4; i++)
11727 target[i] = be32_to_cpu(source[i]);
11731 Ops array is stored in the following format:
11732 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11734 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11736 const __be32 *source = (const __be32 *)_source;
11737 struct raw_op *target = (struct raw_op *)_target;
11740 for (i = 0, j = 0; i < n/8; i++, j += 2) {
11741 tmp = be32_to_cpu(source[j]);
11742 target[i].op = (tmp >> 24) & 0xff;
11743 target[i].offset = tmp & 0xffffff;
11744 target[i].raw_data = be32_to_cpu(source[j + 1]);
11748 /* IRO array is stored in the following format:
11749 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11751 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11753 const __be32 *source = (const __be32 *)_source;
11754 struct iro *target = (struct iro *)_target;
11757 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
11758 target[i].base = be32_to_cpu(source[j]);
11760 tmp = be32_to_cpu(source[j]);
11761 target[i].m1 = (tmp >> 16) & 0xffff;
11762 target[i].m2 = tmp & 0xffff;
11764 tmp = be32_to_cpu(source[j]);
11765 target[i].m3 = (tmp >> 16) & 0xffff;
11766 target[i].size = tmp & 0xffff;
11771 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11773 const __be16 *source = (const __be16 *)_source;
11774 u16 *target = (u16 *)_target;
11777 for (i = 0; i < n/2; i++)
11778 target[i] = be16_to_cpu(source[i]);
11781 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11783 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11784 bp->arr = kmalloc(len, GFP_KERNEL); \
11787 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
11788 (u8 *)bp->arr, len); \
11791 static int bnx2x_init_firmware(struct bnx2x *bp)
11793 const char *fw_file_name;
11794 struct bnx2x_fw_file_hdr *fw_hdr;
11800 if (CHIP_IS_E1(bp))
11801 fw_file_name = FW_FILE_NAME_E1;
11802 else if (CHIP_IS_E1H(bp))
11803 fw_file_name = FW_FILE_NAME_E1H;
11804 else if (!CHIP_IS_E1x(bp))
11805 fw_file_name = FW_FILE_NAME_E2;
11807 BNX2X_ERR("Unsupported chip revision\n");
11810 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
11812 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
11814 BNX2X_ERR("Can't load firmware file %s\n",
11816 goto request_firmware_exit;
11819 rc = bnx2x_check_firmware(bp);
11821 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
11822 goto request_firmware_exit;
11825 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11827 /* Initialize the pointers to the init arrays */
11829 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11832 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11835 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
11838 /* STORMs firmware */
11839 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11840 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11841 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11842 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11843 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11844 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11845 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11846 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11847 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11848 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11849 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11850 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11851 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11852 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11853 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11854 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11856 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
11861 kfree(bp->init_ops_offsets);
11862 init_offsets_alloc_err:
11863 kfree(bp->init_ops);
11864 init_ops_alloc_err:
11865 kfree(bp->init_data);
11866 request_firmware_exit:
11867 release_firmware(bp->firmware);
11868 bp->firmware = NULL;
11873 static void bnx2x_release_firmware(struct bnx2x *bp)
11875 kfree(bp->init_ops_offsets);
11876 kfree(bp->init_ops);
11877 kfree(bp->init_data);
11878 release_firmware(bp->firmware);
11879 bp->firmware = NULL;
11883 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
11884 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
11885 .init_hw_cmn = bnx2x_init_hw_common,
11886 .init_hw_port = bnx2x_init_hw_port,
11887 .init_hw_func = bnx2x_init_hw_func,
11889 .reset_hw_cmn = bnx2x_reset_common,
11890 .reset_hw_port = bnx2x_reset_port,
11891 .reset_hw_func = bnx2x_reset_func,
11893 .gunzip_init = bnx2x_gunzip_init,
11894 .gunzip_end = bnx2x_gunzip_end,
11896 .init_fw = bnx2x_init_firmware,
11897 .release_fw = bnx2x_release_firmware,
11900 void bnx2x__init_func_obj(struct bnx2x *bp)
11902 /* Prepare DMAE related driver resources */
11903 bnx2x_setup_dmae(bp);
11905 bnx2x_init_func_obj(bp, &bp->func_obj,
11906 bnx2x_sp(bp, func_rdata),
11907 bnx2x_sp_mapping(bp, func_rdata),
11908 bnx2x_sp(bp, func_afex_rdata),
11909 bnx2x_sp_mapping(bp, func_afex_rdata),
11910 &bnx2x_func_sp_drv);
11913 /* must be called after sriov-enable */
11914 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11916 int cid_count = BNX2X_L2_MAX_CID(bp);
11918 if (CNIC_SUPPORT(bp))
11919 cid_count += CNIC_CID_MAX;
11920 return roundup(cid_count, QM_CID_ROUND);
11924 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
11929 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
11935 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
11938 * If MSI-X is not supported - return number of SBs needed to support
11939 * one fast path queue: one FP queue + SB for CNIC
11942 return 1 + cnic_cnt;
11945 * The value in the PCI configuration space is the index of the last
11946 * entry, namely one less than the actual size of the table, which is
11947 * exactly what we want to return from this function: number of all SBs
11948 * without the default SB.
11950 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
11951 return control & PCI_MSIX_FLAGS_QSIZE;
11954 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11955 const struct pci_device_id *ent)
11957 struct net_device *dev = NULL;
11959 int pcie_width, pcie_speed;
11960 int rc, max_non_def_sbs;
11961 int rx_count, tx_count, rss_count, doorbell_size;
11964 * An estimated maximum supported CoS number according to the chip
11966 * We will try to roughly estimate the maximum number of CoSes this chip
11967 * may support in order to minimize the memory allocated for Tx
11968 * netdev_queue's. This number will be accurately calculated during the
11969 * initialization of bp->max_cos based on the chip versions AND chip
11970 * revision in the bnx2x_init_bp().
11972 u8 max_cos_est = 0;
11974 switch (ent->driver_data) {
11978 max_cos_est = BNX2X_MULTI_TX_COS_E1X;
11983 max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
11991 case BCM57840_4_10:
11992 case BCM57840_2_20:
11997 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
12001 pr_err("Unknown board_type (%ld), aborting\n",
12007 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
12009 WARN_ON(!max_non_def_sbs);
12011 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12012 rss_count = max_non_def_sbs - cnic_cnt;
12014 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
12015 rx_count = rss_count + cnic_cnt;
12018 * Maximum number of netdev Tx queues:
12019 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
12021 tx_count = rss_count * max_cos_est + cnic_cnt;
12023 /* dev zeroed in init_etherdev */
12024 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12028 bp = netdev_priv(dev);
12030 bp->igu_sb_cnt = max_non_def_sbs;
12031 bp->msg_enable = debug;
12032 bp->cnic_support = cnic_cnt;
12034 pci_set_drvdata(pdev, dev);
12036 rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
12042 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12043 BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
12045 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12046 tx_count, rx_count);
12048 rc = bnx2x_init_bp(bp);
12050 goto init_one_exit;
12053 * Map doorbels here as we need the real value of bp->max_cos which
12054 * is initialized in bnx2x_init_bp().
12056 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12057 if (doorbell_size > pci_resource_len(pdev, 2)) {
12058 dev_err(&bp->pdev->dev,
12059 "Cannot map doorbells, bar size too small, aborting\n");
12061 goto init_one_exit;
12063 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12065 if (!bp->doorbells) {
12066 dev_err(&bp->pdev->dev,
12067 "Cannot map doorbell space, aborting\n");
12069 goto init_one_exit;
12072 /* calc qm_cid_count */
12073 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12075 /* disable FCOE L2 queue for E1x*/
12076 if (CHIP_IS_E1x(bp))
12077 bp->flags |= NO_FCOE_FLAG;
12079 /* disable FCOE for 57840 device, until FW supports it */
12080 switch (ent->driver_data) {
12082 case BCM57840_4_10:
12083 case BCM57840_2_20:
12086 bp->flags |= NO_FCOE_FLAG;
12089 /* Set bp->num_queues for MSI-X mode*/
12090 bnx2x_set_num_queues(bp);
12092 /* Configure interrupt mode: try to enable MSI-X/MSI if
12095 bnx2x_set_int_mode(bp);
12097 rc = register_netdev(dev);
12099 dev_err(&pdev->dev, "Cannot register net device\n");
12100 goto init_one_exit;
12104 if (!NO_FCOE(bp)) {
12105 /* Add storage MAC address */
12107 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12111 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12114 "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12115 board_info[ent->driver_data].name,
12116 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12118 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
12119 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
12120 "5GHz (Gen2)" : "2.5GHz",
12121 dev->base_addr, bp->pdev->irq, dev->dev_addr);
12127 iounmap(bp->regview);
12130 iounmap(bp->doorbells);
12134 if (atomic_read(&pdev->enable_cnt) == 1)
12135 pci_release_regions(pdev);
12137 pci_disable_device(pdev);
12138 pci_set_drvdata(pdev, NULL);
12143 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12145 struct net_device *dev = pci_get_drvdata(pdev);
12149 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12152 bp = netdev_priv(dev);
12154 /* Delete storage MAC address */
12155 if (!NO_FCOE(bp)) {
12157 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12162 /* Delete app tlvs from dcbnl */
12163 bnx2x_dcbnl_update_applist(bp, true);
12166 unregister_netdev(dev);
12168 /* Power on: we can't let PCI layer write to us while we are in D3 */
12169 bnx2x_set_power_state(bp, PCI_D0);
12171 /* Disable MSI/MSI-X */
12172 bnx2x_disable_msi(bp);
12175 bnx2x_set_power_state(bp, PCI_D3hot);
12177 /* Make sure RESET task is not scheduled before continuing */
12178 cancel_delayed_work_sync(&bp->sp_rtnl_task);
12181 iounmap(bp->regview);
12184 iounmap(bp->doorbells);
12186 bnx2x_release_firmware(bp);
12188 bnx2x_free_mem_bp(bp);
12192 if (atomic_read(&pdev->enable_cnt) == 1)
12193 pci_release_regions(pdev);
12195 pci_disable_device(pdev);
12196 pci_set_drvdata(pdev, NULL);
12199 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12203 bp->state = BNX2X_STATE_ERROR;
12205 bp->rx_mode = BNX2X_RX_MODE_NONE;
12207 if (CNIC_LOADED(bp))
12208 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12211 bnx2x_tx_disable(bp);
12213 bnx2x_netif_stop(bp, 0);
12214 /* Delete all NAPI objects */
12215 bnx2x_del_all_napi(bp);
12216 if (CNIC_LOADED(bp))
12217 bnx2x_del_all_napi_cnic(bp);
12219 del_timer_sync(&bp->timer);
12221 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
12224 bnx2x_free_irq(bp);
12226 /* Free SKBs, SGEs, TPA pool and driver internals */
12227 bnx2x_free_skbs(bp);
12229 for_each_rx_queue(bp, i)
12230 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12232 bnx2x_free_mem(bp);
12234 bp->state = BNX2X_STATE_CLOSED;
12236 netif_carrier_off(bp->dev);
12241 static void bnx2x_eeh_recover(struct bnx2x *bp)
12245 mutex_init(&bp->port.phy_mutex);
12248 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12249 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12250 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12251 BNX2X_ERR("BAD MCP validity signature\n");
12255 * bnx2x_io_error_detected - called when PCI error is detected
12256 * @pdev: Pointer to PCI device
12257 * @state: The current pci connection state
12259 * This function is called after a PCI bus error affecting
12260 * this device has been detected.
12262 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12263 pci_channel_state_t state)
12265 struct net_device *dev = pci_get_drvdata(pdev);
12266 struct bnx2x *bp = netdev_priv(dev);
12270 netif_device_detach(dev);
12272 if (state == pci_channel_io_perm_failure) {
12274 return PCI_ERS_RESULT_DISCONNECT;
12277 if (netif_running(dev))
12278 bnx2x_eeh_nic_unload(bp);
12280 pci_disable_device(pdev);
12284 /* Request a slot reset */
12285 return PCI_ERS_RESULT_NEED_RESET;
12289 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12290 * @pdev: Pointer to PCI device
12292 * Restart the card from scratch, as if from a cold-boot.
12294 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12296 struct net_device *dev = pci_get_drvdata(pdev);
12297 struct bnx2x *bp = netdev_priv(dev);
12301 if (pci_enable_device(pdev)) {
12302 dev_err(&pdev->dev,
12303 "Cannot re-enable PCI device after reset\n");
12305 return PCI_ERS_RESULT_DISCONNECT;
12308 pci_set_master(pdev);
12309 pci_restore_state(pdev);
12311 if (netif_running(dev))
12312 bnx2x_set_power_state(bp, PCI_D0);
12316 return PCI_ERS_RESULT_RECOVERED;
12320 * bnx2x_io_resume - called when traffic can start flowing again
12321 * @pdev: Pointer to PCI device
12323 * This callback is called when the error recovery driver tells us that
12324 * its OK to resume normal operation.
12326 static void bnx2x_io_resume(struct pci_dev *pdev)
12328 struct net_device *dev = pci_get_drvdata(pdev);
12329 struct bnx2x *bp = netdev_priv(dev);
12331 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12332 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
12338 bnx2x_eeh_recover(bp);
12340 if (netif_running(dev))
12341 bnx2x_nic_load(bp, LOAD_NORMAL);
12343 netif_device_attach(dev);
12348 static const struct pci_error_handlers bnx2x_err_handler = {
12349 .error_detected = bnx2x_io_error_detected,
12350 .slot_reset = bnx2x_io_slot_reset,
12351 .resume = bnx2x_io_resume,
12354 static struct pci_driver bnx2x_pci_driver = {
12355 .name = DRV_MODULE_NAME,
12356 .id_table = bnx2x_pci_tbl,
12357 .probe = bnx2x_init_one,
12358 .remove = __devexit_p(bnx2x_remove_one),
12359 .suspend = bnx2x_suspend,
12360 .resume = bnx2x_resume,
12361 .err_handler = &bnx2x_err_handler,
12364 static int __init bnx2x_init(void)
12368 pr_info("%s", version);
12370 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12371 if (bnx2x_wq == NULL) {
12372 pr_err("Cannot create workqueue\n");
12376 ret = pci_register_driver(&bnx2x_pci_driver);
12378 pr_err("Cannot register driver\n");
12379 destroy_workqueue(bnx2x_wq);
12384 static void __exit bnx2x_cleanup(void)
12386 struct list_head *pos, *q;
12387 pci_unregister_driver(&bnx2x_pci_driver);
12389 destroy_workqueue(bnx2x_wq);
12391 /* Free globablly allocated resources */
12392 list_for_each_safe(pos, q, &bnx2x_prev_list) {
12393 struct bnx2x_prev_path_list *tmp =
12394 list_entry(pos, struct bnx2x_prev_path_list, list);
12400 void bnx2x_notify_link_changed(struct bnx2x *bp)
12402 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
12405 module_init(bnx2x_init);
12406 module_exit(bnx2x_cleanup);
12409 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
12411 * @bp: driver handle
12412 * @set: set or clear the CAM entry
12414 * This function will wait until the ramdord completion returns.
12415 * Return 0 if success, -ENODEV if ramrod doesn't return.
12417 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
12419 unsigned long ramrod_flags = 0;
12421 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12422 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
12423 &bp->iscsi_l2_mac_obj, true,
12424 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
12427 /* count denotes the number of new completions we have seen */
12428 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12430 struct eth_spe *spe;
12431 int cxt_index, cxt_offset;
12433 #ifdef BNX2X_STOP_ON_ERROR
12434 if (unlikely(bp->panic))
12438 spin_lock_bh(&bp->spq_lock);
12439 BUG_ON(bp->cnic_spq_pending < count);
12440 bp->cnic_spq_pending -= count;
12443 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
12444 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
12445 & SPE_HDR_CONN_TYPE) >>
12446 SPE_HDR_CONN_TYPE_SHIFT;
12447 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
12448 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
12450 /* Set validation for iSCSI L2 client before sending SETUP
12453 if (type == ETH_CONNECTION_TYPE) {
12454 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
12455 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
12457 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
12458 (cxt_index * ILT_PAGE_CIDS);
12459 bnx2x_set_ctx_validation(bp,
12460 &bp->context[cxt_index].
12461 vcxt[cxt_offset].eth,
12462 BNX2X_ISCSI_ETH_CID(bp));
12467 * There may be not more than 8 L2, not more than 8 L5 SPEs
12468 * and in the air. We also check that number of outstanding
12469 * COMMON ramrods is not more than the EQ and SPQ can
12472 if (type == ETH_CONNECTION_TYPE) {
12473 if (!atomic_read(&bp->cq_spq_left))
12476 atomic_dec(&bp->cq_spq_left);
12477 } else if (type == NONE_CONNECTION_TYPE) {
12478 if (!atomic_read(&bp->eq_spq_left))
12481 atomic_dec(&bp->eq_spq_left);
12482 } else if ((type == ISCSI_CONNECTION_TYPE) ||
12483 (type == FCOE_CONNECTION_TYPE)) {
12484 if (bp->cnic_spq_pending >=
12485 bp->cnic_eth_dev.max_kwqe_pending)
12488 bp->cnic_spq_pending++;
12490 BNX2X_ERR("Unknown SPE type: %d\n", type);
12495 spe = bnx2x_sp_get_next(bp);
12496 *spe = *bp->cnic_kwq_cons;
12498 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
12499 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12501 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12502 bp->cnic_kwq_cons = bp->cnic_kwq;
12504 bp->cnic_kwq_cons++;
12506 bnx2x_sp_prod_update(bp);
12507 spin_unlock_bh(&bp->spq_lock);
12510 static int bnx2x_cnic_sp_queue(struct net_device *dev,
12511 struct kwqe_16 *kwqes[], u32 count)
12513 struct bnx2x *bp = netdev_priv(dev);
12516 #ifdef BNX2X_STOP_ON_ERROR
12517 if (unlikely(bp->panic)) {
12518 BNX2X_ERR("Can't post to SP queue while panic\n");
12523 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
12524 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
12525 BNX2X_ERR("Handling parity error recovery. Try again later\n");
12529 spin_lock_bh(&bp->spq_lock);
12531 for (i = 0; i < count; i++) {
12532 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12534 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12537 *bp->cnic_kwq_prod = *spe;
12539 bp->cnic_kwq_pending++;
12541 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
12542 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12543 spe->data.update_data_addr.hi,
12544 spe->data.update_data_addr.lo,
12545 bp->cnic_kwq_pending);
12547 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12548 bp->cnic_kwq_prod = bp->cnic_kwq;
12550 bp->cnic_kwq_prod++;
12553 spin_unlock_bh(&bp->spq_lock);
12555 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12556 bnx2x_cnic_sp_post(bp, 0);
12561 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12563 struct cnic_ops *c_ops;
12566 mutex_lock(&bp->cnic_mutex);
12567 c_ops = rcu_dereference_protected(bp->cnic_ops,
12568 lockdep_is_held(&bp->cnic_mutex));
12570 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12571 mutex_unlock(&bp->cnic_mutex);
12576 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12578 struct cnic_ops *c_ops;
12582 c_ops = rcu_dereference(bp->cnic_ops);
12584 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12591 * for commands that have no data
12593 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12595 struct cnic_ctl_info ctl = {0};
12599 return bnx2x_cnic_ctl_send(bp, &ctl);
12602 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
12604 struct cnic_ctl_info ctl = {0};
12606 /* first we tell CNIC and only then we count this as a completion */
12607 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12608 ctl.data.comp.cid = cid;
12609 ctl.data.comp.error = err;
12611 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12612 bnx2x_cnic_sp_post(bp, 0);
12616 /* Called with netif_addr_lock_bh() taken.
12617 * Sets an rx_mode config for an iSCSI ETH client.
12619 * Completion should be checked outside.
12621 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
12623 unsigned long accept_flags = 0, ramrod_flags = 0;
12624 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12625 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
12628 /* Start accepting on iSCSI L2 ring. Accept all multicasts
12629 * because it's the only way for UIO Queue to accept
12630 * multicasts (in non-promiscuous mode only one Queue per
12631 * function will receive multicast packets (leading in our
12634 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
12635 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
12636 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
12637 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
12639 /* Clear STOP_PENDING bit if START is requested */
12640 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
12642 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
12644 /* Clear START_PENDING bit if STOP is requested */
12645 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
12647 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
12648 set_bit(sched_state, &bp->sp_state);
12650 __set_bit(RAMROD_RX, &ramrod_flags);
12651 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
12657 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12659 struct bnx2x *bp = netdev_priv(dev);
12662 switch (ctl->cmd) {
12663 case DRV_CTL_CTXTBL_WR_CMD: {
12664 u32 index = ctl->data.io.offset;
12665 dma_addr_t addr = ctl->data.io.dma_addr;
12667 bnx2x_ilt_wr(bp, index, addr);
12671 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
12672 int count = ctl->data.credit.credit_count;
12674 bnx2x_cnic_sp_post(bp, count);
12678 /* rtnl_lock is held. */
12679 case DRV_CTL_START_L2_CMD: {
12680 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12681 unsigned long sp_bits = 0;
12683 /* Configure the iSCSI classification object */
12684 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
12685 cp->iscsi_l2_client_id,
12686 cp->iscsi_l2_cid, BP_FUNC(bp),
12687 bnx2x_sp(bp, mac_rdata),
12688 bnx2x_sp_mapping(bp, mac_rdata),
12689 BNX2X_FILTER_MAC_PENDING,
12690 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
12693 /* Set iSCSI MAC address */
12694 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
12701 /* Start accepting on iSCSI L2 ring */
12703 netif_addr_lock_bh(dev);
12704 bnx2x_set_iscsi_eth_rx_mode(bp, true);
12705 netif_addr_unlock_bh(dev);
12707 /* bits to wait on */
12708 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
12709 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
12711 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12712 BNX2X_ERR("rx_mode completion timed out!\n");
12717 /* rtnl_lock is held. */
12718 case DRV_CTL_STOP_L2_CMD: {
12719 unsigned long sp_bits = 0;
12721 /* Stop accepting on iSCSI L2 ring */
12722 netif_addr_lock_bh(dev);
12723 bnx2x_set_iscsi_eth_rx_mode(bp, false);
12724 netif_addr_unlock_bh(dev);
12726 /* bits to wait on */
12727 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
12728 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
12730 if (!bnx2x_wait_sp_comp(bp, sp_bits))
12731 BNX2X_ERR("rx_mode completion timed out!\n");
12736 /* Unset iSCSI L2 MAC */
12737 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
12738 BNX2X_ISCSI_ETH_MAC, true);
12741 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
12742 int count = ctl->data.credit.credit_count;
12744 smp_mb__before_atomic_inc();
12745 atomic_add(count, &bp->cq_spq_left);
12746 smp_mb__after_atomic_inc();
12749 case DRV_CTL_ULP_REGISTER_CMD: {
12750 int ulp_type = ctl->data.register_data.ulp_type;
12752 if (CHIP_IS_E3(bp)) {
12753 int idx = BP_FW_MB_IDX(bp);
12754 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12755 int path = BP_PATH(bp);
12756 int port = BP_PORT(bp);
12758 u32 scratch_offset;
12761 /* first write capability to shmem2 */
12762 if (ulp_type == CNIC_ULP_ISCSI)
12763 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12764 else if (ulp_type == CNIC_ULP_FCOE)
12765 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12766 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12768 if ((ulp_type != CNIC_ULP_FCOE) ||
12769 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
12770 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
12773 /* if reached here - should write fcoe capabilities */
12774 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
12775 if (!scratch_offset)
12777 scratch_offset += offsetof(struct glob_ncsi_oem_data,
12778 fcoe_features[path][port]);
12779 host_addr = (u32 *) &(ctl->data.register_data.
12781 for (i = 0; i < sizeof(struct fcoe_capabilities);
12783 REG_WR(bp, scratch_offset + i,
12784 *(host_addr + i/4));
12789 case DRV_CTL_ULP_UNREGISTER_CMD: {
12790 int ulp_type = ctl->data.ulp_type;
12792 if (CHIP_IS_E3(bp)) {
12793 int idx = BP_FW_MB_IDX(bp);
12796 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
12797 if (ulp_type == CNIC_ULP_ISCSI)
12798 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
12799 else if (ulp_type == CNIC_ULP_FCOE)
12800 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
12801 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
12807 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12814 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12816 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12818 if (bp->flags & USING_MSIX_FLAG) {
12819 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12820 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12821 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12823 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12824 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12826 if (!CHIP_IS_E1x(bp))
12827 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
12829 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
12831 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
12832 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
12833 cp->irq_arr[1].status_blk = bp->def_status_blk;
12834 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12835 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
12840 void bnx2x_setup_cnic_info(struct bnx2x *bp)
12842 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12845 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12846 bnx2x_cid_ilt_lines(bp);
12847 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12848 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12849 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12851 if (NO_ISCSI_OOO(bp))
12852 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12855 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12858 struct bnx2x *bp = netdev_priv(dev);
12859 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12862 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
12865 BNX2X_ERR("NULL ops received\n");
12869 if (!CNIC_SUPPORT(bp)) {
12870 BNX2X_ERR("Can't register CNIC when not supported\n");
12871 return -EOPNOTSUPP;
12874 if (!CNIC_LOADED(bp)) {
12875 rc = bnx2x_load_cnic(bp);
12877 BNX2X_ERR("CNIC-related load failed\n");
12883 bp->cnic_enabled = true;
12885 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12889 bp->cnic_kwq_cons = bp->cnic_kwq;
12890 bp->cnic_kwq_prod = bp->cnic_kwq;
12891 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12893 bp->cnic_spq_pending = 0;
12894 bp->cnic_kwq_pending = 0;
12896 bp->cnic_data = data;
12899 cp->drv_state |= CNIC_DRV_STATE_REGD;
12900 cp->iro_arr = bp->iro_arr;
12902 bnx2x_setup_cnic_irq_info(bp);
12904 rcu_assign_pointer(bp->cnic_ops, ops);
12909 static int bnx2x_unregister_cnic(struct net_device *dev)
12911 struct bnx2x *bp = netdev_priv(dev);
12912 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12914 mutex_lock(&bp->cnic_mutex);
12916 RCU_INIT_POINTER(bp->cnic_ops, NULL);
12917 mutex_unlock(&bp->cnic_mutex);
12919 kfree(bp->cnic_kwq);
12920 bp->cnic_kwq = NULL;
12925 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12927 struct bnx2x *bp = netdev_priv(dev);
12928 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12930 /* If both iSCSI and FCoE are disabled - return NULL in
12931 * order to indicate CNIC that it should not try to work
12932 * with this device.
12934 if (NO_ISCSI(bp) && NO_FCOE(bp))
12937 cp->drv_owner = THIS_MODULE;
12938 cp->chip_id = CHIP_ID(bp);
12939 cp->pdev = bp->pdev;
12940 cp->io_base = bp->regview;
12941 cp->io_base2 = bp->doorbells;
12942 cp->max_kwqe_pending = 8;
12943 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
12944 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
12945 bnx2x_cid_ilt_lines(bp);
12946 cp->ctx_tbl_len = CNIC_ILT_LINES;
12947 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
12948 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12949 cp->drv_ctl = bnx2x_drv_ctl;
12950 cp->drv_register_cnic = bnx2x_register_cnic;
12951 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12952 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
12953 cp->iscsi_l2_client_id =
12954 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
12955 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
12957 if (NO_ISCSI_OOO(bp))
12958 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
12961 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
12964 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
12967 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
12969 cp->ctx_tbl_offset,
12974 EXPORT_SYMBOL(bnx2x_cnic_probe);