]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
bnx2x: Improve PF behaviour toward VF
[~andy/linux] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2013 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/module.h>
21 #include <linux/moduleparam.h>
22 #include <linux/kernel.h>
23 #include <linux/device.h>  /* for dev_info() */
24 #include <linux/timer.h>
25 #include <linux/errno.h>
26 #include <linux/ioport.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/pci.h>
30 #include <linux/init.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/bitops.h>
36 #include <linux/irq.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/time.h>
40 #include <linux/ethtool.h>
41 #include <linux/mii.h>
42 #include <linux/if_vlan.h>
43 #include <net/ip.h>
44 #include <net/ipv6.h>
45 #include <net/tcp.h>
46 #include <net/checksum.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/workqueue.h>
49 #include <linux/crc32.h>
50 #include <linux/crc32c.h>
51 #include <linux/prefetch.h>
52 #include <linux/zlib.h>
53 #include <linux/io.h>
54 #include <linux/semaphore.h>
55 #include <linux/stringify.h>
56 #include <linux/vmalloc.h>
57
58 #include "bnx2x.h"
59 #include "bnx2x_init.h"
60 #include "bnx2x_init_ops.h"
61 #include "bnx2x_cmn.h"
62 #include "bnx2x_vfpf.h"
63 #include "bnx2x_dcb.h"
64 #include "bnx2x_sp.h"
65
66 #include <linux/firmware.h>
67 #include "bnx2x_fw_file_hdr.h"
68 /* FW files */
69 #define FW_FILE_VERSION                                 \
70         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
71         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
72         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
73         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
74 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
75 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
76 #define FW_FILE_NAME_E2         "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77
78 /* Time in jiffies before concluding the transmitter is hung */
79 #define TX_TIMEOUT              (5*HZ)
80
81 static char version[] =
82         "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
83         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
84
85 MODULE_AUTHOR("Eliezer Tamir");
86 MODULE_DESCRIPTION("Broadcom NetXtreme II "
87                    "BCM57710/57711/57711E/"
88                    "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
89                    "57840/57840_MF Driver");
90 MODULE_LICENSE("GPL");
91 MODULE_VERSION(DRV_MODULE_VERSION);
92 MODULE_FIRMWARE(FW_FILE_NAME_E1);
93 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94 MODULE_FIRMWARE(FW_FILE_NAME_E2);
95
96 int num_queues;
97 module_param(num_queues, int, 0);
98 MODULE_PARM_DESC(num_queues,
99                  " Set number of queues (default is as a number of CPUs)");
100
101 static int disable_tpa;
102 module_param(disable_tpa, int, 0);
103 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104
105 int int_mode;
106 module_param(int_mode, int, 0);
107 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
108                                 "(1 INT#x; 2 MSI)");
109
110 static int dropless_fc;
111 module_param(dropless_fc, int, 0);
112 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113
114 static int mrrs = -1;
115 module_param(mrrs, int, 0);
116 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
118 static int debug;
119 module_param(debug, int, 0);
120 MODULE_PARM_DESC(debug, " Default debug msglevel");
121
122 struct workqueue_struct *bnx2x_wq;
123
124 struct bnx2x_mac_vals {
125         u32 xmac_addr;
126         u32 xmac_val;
127         u32 emac_addr;
128         u32 emac_val;
129         u32 umac_addr;
130         u32 umac_val;
131         u32 bmac_addr;
132         u32 bmac_val[2];
133 };
134
135 enum bnx2x_board_type {
136         BCM57710 = 0,
137         BCM57711,
138         BCM57711E,
139         BCM57712,
140         BCM57712_MF,
141         BCM57712_VF,
142         BCM57800,
143         BCM57800_MF,
144         BCM57800_VF,
145         BCM57810,
146         BCM57810_MF,
147         BCM57810_VF,
148         BCM57840_4_10,
149         BCM57840_2_20,
150         BCM57840_MF,
151         BCM57840_VF,
152         BCM57811,
153         BCM57811_MF,
154         BCM57840_O,
155         BCM57840_MFO,
156         BCM57811_VF
157 };
158
159 /* indexed by board_type, above */
160 static struct {
161         char *name;
162 } board_info[] = {
163         [BCM57710]      = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
164         [BCM57711]      = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
165         [BCM57711E]     = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
166         [BCM57712]      = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
167         [BCM57712_MF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
168         [BCM57712_VF]   = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
169         [BCM57800]      = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
170         [BCM57800_MF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
171         [BCM57800_VF]   = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
172         [BCM57810]      = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
173         [BCM57810_MF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
174         [BCM57810_VF]   = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
175         [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
176         [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
177         [BCM57840_MF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
178         [BCM57840_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
179         [BCM57811]      = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
180         [BCM57811_MF]   = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
181         [BCM57840_O]    = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
182         [BCM57840_MFO]  = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
183         [BCM57811_VF]   = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
184 };
185
186 #ifndef PCI_DEVICE_ID_NX2_57710
187 #define PCI_DEVICE_ID_NX2_57710         CHIP_NUM_57710
188 #endif
189 #ifndef PCI_DEVICE_ID_NX2_57711
190 #define PCI_DEVICE_ID_NX2_57711         CHIP_NUM_57711
191 #endif
192 #ifndef PCI_DEVICE_ID_NX2_57711E
193 #define PCI_DEVICE_ID_NX2_57711E        CHIP_NUM_57711E
194 #endif
195 #ifndef PCI_DEVICE_ID_NX2_57712
196 #define PCI_DEVICE_ID_NX2_57712         CHIP_NUM_57712
197 #endif
198 #ifndef PCI_DEVICE_ID_NX2_57712_MF
199 #define PCI_DEVICE_ID_NX2_57712_MF      CHIP_NUM_57712_MF
200 #endif
201 #ifndef PCI_DEVICE_ID_NX2_57712_VF
202 #define PCI_DEVICE_ID_NX2_57712_VF      CHIP_NUM_57712_VF
203 #endif
204 #ifndef PCI_DEVICE_ID_NX2_57800
205 #define PCI_DEVICE_ID_NX2_57800         CHIP_NUM_57800
206 #endif
207 #ifndef PCI_DEVICE_ID_NX2_57800_MF
208 #define PCI_DEVICE_ID_NX2_57800_MF      CHIP_NUM_57800_MF
209 #endif
210 #ifndef PCI_DEVICE_ID_NX2_57800_VF
211 #define PCI_DEVICE_ID_NX2_57800_VF      CHIP_NUM_57800_VF
212 #endif
213 #ifndef PCI_DEVICE_ID_NX2_57810
214 #define PCI_DEVICE_ID_NX2_57810         CHIP_NUM_57810
215 #endif
216 #ifndef PCI_DEVICE_ID_NX2_57810_MF
217 #define PCI_DEVICE_ID_NX2_57810_MF      CHIP_NUM_57810_MF
218 #endif
219 #ifndef PCI_DEVICE_ID_NX2_57840_O
220 #define PCI_DEVICE_ID_NX2_57840_O       CHIP_NUM_57840_OBSOLETE
221 #endif
222 #ifndef PCI_DEVICE_ID_NX2_57810_VF
223 #define PCI_DEVICE_ID_NX2_57810_VF      CHIP_NUM_57810_VF
224 #endif
225 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
226 #define PCI_DEVICE_ID_NX2_57840_4_10    CHIP_NUM_57840_4_10
227 #endif
228 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
229 #define PCI_DEVICE_ID_NX2_57840_2_20    CHIP_NUM_57840_2_20
230 #endif
231 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
232 #define PCI_DEVICE_ID_NX2_57840_MFO     CHIP_NUM_57840_MF_OBSOLETE
233 #endif
234 #ifndef PCI_DEVICE_ID_NX2_57840_MF
235 #define PCI_DEVICE_ID_NX2_57840_MF      CHIP_NUM_57840_MF
236 #endif
237 #ifndef PCI_DEVICE_ID_NX2_57840_VF
238 #define PCI_DEVICE_ID_NX2_57840_VF      CHIP_NUM_57840_VF
239 #endif
240 #ifndef PCI_DEVICE_ID_NX2_57811
241 #define PCI_DEVICE_ID_NX2_57811         CHIP_NUM_57811
242 #endif
243 #ifndef PCI_DEVICE_ID_NX2_57811_MF
244 #define PCI_DEVICE_ID_NX2_57811_MF      CHIP_NUM_57811_MF
245 #endif
246 #ifndef PCI_DEVICE_ID_NX2_57811_VF
247 #define PCI_DEVICE_ID_NX2_57811_VF      CHIP_NUM_57811_VF
248 #endif
249
250 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
251         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
252         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
253         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
254         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
255         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
256         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
257         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
258         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
259         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
260         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
261         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
262         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
263         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
264         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
265         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
266         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
267         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
268         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
269         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
270         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
271         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
272         { 0 }
273 };
274
275 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
276
277 /* Global resources for unloading a previously loaded device */
278 #define BNX2X_PREV_WAIT_NEEDED 1
279 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
280 static LIST_HEAD(bnx2x_prev_list);
281 /****************************************************************************
282 * General service functions
283 ****************************************************************************/
284
285 static void __storm_memset_dma_mapping(struct bnx2x *bp,
286                                        u32 addr, dma_addr_t mapping)
287 {
288         REG_WR(bp,  addr, U64_LO(mapping));
289         REG_WR(bp,  addr + 4, U64_HI(mapping));
290 }
291
292 static void storm_memset_spq_addr(struct bnx2x *bp,
293                                   dma_addr_t mapping, u16 abs_fid)
294 {
295         u32 addr = XSEM_REG_FAST_MEMORY +
296                         XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
297
298         __storm_memset_dma_mapping(bp, addr, mapping);
299 }
300
301 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
302                                   u16 pf_id)
303 {
304         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
305                 pf_id);
306         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
307                 pf_id);
308         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
309                 pf_id);
310         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
311                 pf_id);
312 }
313
314 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
315                                  u8 enable)
316 {
317         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
318                 enable);
319         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
320                 enable);
321         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
322                 enable);
323         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
324                 enable);
325 }
326
327 static void storm_memset_eq_data(struct bnx2x *bp,
328                                  struct event_ring_data *eq_data,
329                                 u16 pfid)
330 {
331         size_t size = sizeof(struct event_ring_data);
332
333         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
334
335         __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
336 }
337
338 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
339                                  u16 pfid)
340 {
341         u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
342         REG_WR16(bp, addr, eq_prod);
343 }
344
345 /* used only at init
346  * locking is done by mcp
347  */
348 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
349 {
350         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
351         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
352         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
353                                PCICFG_VENDOR_ID_OFFSET);
354 }
355
356 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
357 {
358         u32 val;
359
360         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
361         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
362         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
363                                PCICFG_VENDOR_ID_OFFSET);
364
365         return val;
366 }
367
368 #define DMAE_DP_SRC_GRC         "grc src_addr [%08x]"
369 #define DMAE_DP_SRC_PCI         "pci src_addr [%x:%08x]"
370 #define DMAE_DP_DST_GRC         "grc dst_addr [%08x]"
371 #define DMAE_DP_DST_PCI         "pci dst_addr [%x:%08x]"
372 #define DMAE_DP_DST_NONE        "dst_addr [none]"
373
374 static void bnx2x_dp_dmae(struct bnx2x *bp,
375                           struct dmae_command *dmae, int msglvl)
376 {
377         u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
378         int i;
379
380         switch (dmae->opcode & DMAE_COMMAND_DST) {
381         case DMAE_CMD_DST_PCI:
382                 if (src_type == DMAE_CMD_SRC_PCI)
383                         DP(msglvl, "DMAE: opcode 0x%08x\n"
384                            "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
385                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
386                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
387                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
388                            dmae->comp_addr_hi, dmae->comp_addr_lo,
389                            dmae->comp_val);
390                 else
391                         DP(msglvl, "DMAE: opcode 0x%08x\n"
392                            "src [%08x], len [%d*4], dst [%x:%08x]\n"
393                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
394                            dmae->opcode, dmae->src_addr_lo >> 2,
395                            dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
396                            dmae->comp_addr_hi, dmae->comp_addr_lo,
397                            dmae->comp_val);
398                 break;
399         case DMAE_CMD_DST_GRC:
400                 if (src_type == DMAE_CMD_SRC_PCI)
401                         DP(msglvl, "DMAE: opcode 0x%08x\n"
402                            "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
403                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
404                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
405                            dmae->len, dmae->dst_addr_lo >> 2,
406                            dmae->comp_addr_hi, dmae->comp_addr_lo,
407                            dmae->comp_val);
408                 else
409                         DP(msglvl, "DMAE: opcode 0x%08x\n"
410                            "src [%08x], len [%d*4], dst [%08x]\n"
411                            "comp_addr [%x:%08x], comp_val 0x%08x\n",
412                            dmae->opcode, dmae->src_addr_lo >> 2,
413                            dmae->len, dmae->dst_addr_lo >> 2,
414                            dmae->comp_addr_hi, dmae->comp_addr_lo,
415                            dmae->comp_val);
416                 break;
417         default:
418                 if (src_type == DMAE_CMD_SRC_PCI)
419                         DP(msglvl, "DMAE: opcode 0x%08x\n"
420                            "src_addr [%x:%08x]  len [%d * 4]  dst_addr [none]\n"
421                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
422                            dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
423                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
424                            dmae->comp_val);
425                 else
426                         DP(msglvl, "DMAE: opcode 0x%08x\n"
427                            "src_addr [%08x]  len [%d * 4]  dst_addr [none]\n"
428                            "comp_addr [%x:%08x]  comp_val 0x%08x\n",
429                            dmae->opcode, dmae->src_addr_lo >> 2,
430                            dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
431                            dmae->comp_val);
432                 break;
433         }
434
435         for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
436                 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
437                    i, *(((u32 *)dmae) + i));
438 }
439
440 /* copy command into DMAE command memory and set DMAE command go */
441 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
442 {
443         u32 cmd_offset;
444         int i;
445
446         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
447         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
448                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
449         }
450         REG_WR(bp, dmae_reg_go_c[idx], 1);
451 }
452
453 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
454 {
455         return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
456                            DMAE_CMD_C_ENABLE);
457 }
458
459 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
460 {
461         return opcode & ~DMAE_CMD_SRC_RESET;
462 }
463
464 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
465                              bool with_comp, u8 comp_type)
466 {
467         u32 opcode = 0;
468
469         opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
470                    (dst_type << DMAE_COMMAND_DST_SHIFT));
471
472         opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
473
474         opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
475         opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
476                    (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
477         opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
478
479 #ifdef __BIG_ENDIAN
480         opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
481 #else
482         opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
483 #endif
484         if (with_comp)
485                 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
486         return opcode;
487 }
488
489 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
490                                       struct dmae_command *dmae,
491                                       u8 src_type, u8 dst_type)
492 {
493         memset(dmae, 0, sizeof(struct dmae_command));
494
495         /* set the opcode */
496         dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
497                                          true, DMAE_COMP_PCI);
498
499         /* fill in the completion parameters */
500         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
501         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
502         dmae->comp_val = DMAE_COMP_VAL;
503 }
504
505 /* issue a dmae command over the init-channel and wait for completion */
506 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
507 {
508         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
509         int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
510         int rc = 0;
511
512         bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
513
514         /* Lock the dmae channel. Disable BHs to prevent a dead-lock
515          * as long as this code is called both from syscall context and
516          * from ndo_set_rx_mode() flow that may be called from BH.
517          */
518         spin_lock_bh(&bp->dmae_lock);
519
520         /* reset completion */
521         *wb_comp = 0;
522
523         /* post the command on the channel used for initializations */
524         bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
525
526         /* wait for completion */
527         udelay(5);
528         while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
529
530                 if (!cnt ||
531                     (bp->recovery_state != BNX2X_RECOVERY_DONE &&
532                      bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
533                         BNX2X_ERR("DMAE timeout!\n");
534                         rc = DMAE_TIMEOUT;
535                         goto unlock;
536                 }
537                 cnt--;
538                 udelay(50);
539         }
540         if (*wb_comp & DMAE_PCI_ERR_FLAG) {
541                 BNX2X_ERR("DMAE PCI error!\n");
542                 rc = DMAE_PCI_ERROR;
543         }
544
545 unlock:
546         spin_unlock_bh(&bp->dmae_lock);
547         return rc;
548 }
549
550 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
551                       u32 len32)
552 {
553         int rc;
554         struct dmae_command dmae;
555
556         if (!bp->dmae_ready) {
557                 u32 *data = bnx2x_sp(bp, wb_data[0]);
558
559                 if (CHIP_IS_E1(bp))
560                         bnx2x_init_ind_wr(bp, dst_addr, data, len32);
561                 else
562                         bnx2x_init_str_wr(bp, dst_addr, data, len32);
563                 return;
564         }
565
566         /* set opcode and fixed command fields */
567         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
568
569         /* fill in addresses and len */
570         dmae.src_addr_lo = U64_LO(dma_addr);
571         dmae.src_addr_hi = U64_HI(dma_addr);
572         dmae.dst_addr_lo = dst_addr >> 2;
573         dmae.dst_addr_hi = 0;
574         dmae.len = len32;
575
576         /* issue the command and wait for completion */
577         rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
578         if (rc) {
579                 BNX2X_ERR("DMAE returned failure %d\n", rc);
580                 bnx2x_panic();
581         }
582 }
583
584 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
585 {
586         int rc;
587         struct dmae_command dmae;
588
589         if (!bp->dmae_ready) {
590                 u32 *data = bnx2x_sp(bp, wb_data[0]);
591                 int i;
592
593                 if (CHIP_IS_E1(bp))
594                         for (i = 0; i < len32; i++)
595                                 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
596                 else
597                         for (i = 0; i < len32; i++)
598                                 data[i] = REG_RD(bp, src_addr + i*4);
599
600                 return;
601         }
602
603         /* set opcode and fixed command fields */
604         bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
605
606         /* fill in addresses and len */
607         dmae.src_addr_lo = src_addr >> 2;
608         dmae.src_addr_hi = 0;
609         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
610         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
611         dmae.len = len32;
612
613         /* issue the command and wait for completion */
614         rc = bnx2x_issue_dmae_with_comp(bp, &dmae);
615         if (rc) {
616                 BNX2X_ERR("DMAE returned failure %d\n", rc);
617                 bnx2x_panic();
618         };
619 }
620
621 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
622                                       u32 addr, u32 len)
623 {
624         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
625         int offset = 0;
626
627         while (len > dmae_wr_max) {
628                 bnx2x_write_dmae(bp, phys_addr + offset,
629                                  addr + offset, dmae_wr_max);
630                 offset += dmae_wr_max * 4;
631                 len -= dmae_wr_max;
632         }
633
634         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
635 }
636
637 static int bnx2x_mc_assert(struct bnx2x *bp)
638 {
639         char last_idx;
640         int i, rc = 0;
641         u32 row0, row1, row2, row3;
642
643         /* XSTORM */
644         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
645                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
646         if (last_idx)
647                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
648
649         /* print the asserts */
650         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
651
652                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
653                               XSTORM_ASSERT_LIST_OFFSET(i));
654                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
655                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
656                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
657                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
658                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
659                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
660
661                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
662                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
663                                   i, row3, row2, row1, row0);
664                         rc++;
665                 } else {
666                         break;
667                 }
668         }
669
670         /* TSTORM */
671         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
672                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
673         if (last_idx)
674                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
675
676         /* print the asserts */
677         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
678
679                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
680                               TSTORM_ASSERT_LIST_OFFSET(i));
681                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
682                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
683                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
684                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
685                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
686                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
687
688                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
689                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
690                                   i, row3, row2, row1, row0);
691                         rc++;
692                 } else {
693                         break;
694                 }
695         }
696
697         /* CSTORM */
698         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
699                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
700         if (last_idx)
701                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
702
703         /* print the asserts */
704         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
705
706                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
707                               CSTORM_ASSERT_LIST_OFFSET(i));
708                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
709                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
710                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
711                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
712                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
713                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
714
715                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
716                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
717                                   i, row3, row2, row1, row0);
718                         rc++;
719                 } else {
720                         break;
721                 }
722         }
723
724         /* USTORM */
725         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
726                            USTORM_ASSERT_LIST_INDEX_OFFSET);
727         if (last_idx)
728                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
729
730         /* print the asserts */
731         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
732
733                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
734                               USTORM_ASSERT_LIST_OFFSET(i));
735                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
736                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
737                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
738                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
739                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
740                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
741
742                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
743                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
744                                   i, row3, row2, row1, row0);
745                         rc++;
746                 } else {
747                         break;
748                 }
749         }
750
751         return rc;
752 }
753
754 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
755 {
756         u32 addr, val;
757         u32 mark, offset;
758         __be32 data[9];
759         int word;
760         u32 trace_shmem_base;
761         if (BP_NOMCP(bp)) {
762                 BNX2X_ERR("NO MCP - can not dump\n");
763                 return;
764         }
765         netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
766                 (bp->common.bc_ver & 0xff0000) >> 16,
767                 (bp->common.bc_ver & 0xff00) >> 8,
768                 (bp->common.bc_ver & 0xff));
769
770         val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
771         if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
772                 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
773
774         if (BP_PATH(bp) == 0)
775                 trace_shmem_base = bp->common.shmem_base;
776         else
777                 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
778         addr = trace_shmem_base - 0x800;
779
780         /* validate TRCB signature */
781         mark = REG_RD(bp, addr);
782         if (mark != MFW_TRACE_SIGNATURE) {
783                 BNX2X_ERR("Trace buffer signature is missing.");
784                 return ;
785         }
786
787         /* read cyclic buffer pointer */
788         addr += 4;
789         mark = REG_RD(bp, addr);
790         mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
791                         + ((mark + 0x3) & ~0x3) - 0x08000000;
792         printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
793
794         printk("%s", lvl);
795
796         /* dump buffer after the mark */
797         for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
798                 for (word = 0; word < 8; word++)
799                         data[word] = htonl(REG_RD(bp, offset + 4*word));
800                 data[8] = 0x0;
801                 pr_cont("%s", (char *)data);
802         }
803
804         /* dump buffer before the mark */
805         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
806                 for (word = 0; word < 8; word++)
807                         data[word] = htonl(REG_RD(bp, offset + 4*word));
808                 data[8] = 0x0;
809                 pr_cont("%s", (char *)data);
810         }
811         printk("%s" "end of fw dump\n", lvl);
812 }
813
814 static void bnx2x_fw_dump(struct bnx2x *bp)
815 {
816         bnx2x_fw_dump_lvl(bp, KERN_ERR);
817 }
818
819 static void bnx2x_hc_int_disable(struct bnx2x *bp)
820 {
821         int port = BP_PORT(bp);
822         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
823         u32 val = REG_RD(bp, addr);
824
825         /* in E1 we must use only PCI configuration space to disable
826          * MSI/MSIX capability
827          * It's forbidden to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
828          */
829         if (CHIP_IS_E1(bp)) {
830                 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
831                  * Use mask register to prevent from HC sending interrupts
832                  * after we exit the function
833                  */
834                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
835
836                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
837                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
838                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
839         } else
840                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
841                          HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
842                          HC_CONFIG_0_REG_INT_LINE_EN_0 |
843                          HC_CONFIG_0_REG_ATTN_BIT_EN_0);
844
845         DP(NETIF_MSG_IFDOWN,
846            "write %x to HC %d (addr 0x%x)\n",
847            val, port, addr);
848
849         /* flush all outstanding writes */
850         mmiowb();
851
852         REG_WR(bp, addr, val);
853         if (REG_RD(bp, addr) != val)
854                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
855 }
856
857 static void bnx2x_igu_int_disable(struct bnx2x *bp)
858 {
859         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
860
861         val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
862                  IGU_PF_CONF_INT_LINE_EN |
863                  IGU_PF_CONF_ATTN_BIT_EN);
864
865         DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
866
867         /* flush all outstanding writes */
868         mmiowb();
869
870         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
871         if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
872                 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
873 }
874
875 static void bnx2x_int_disable(struct bnx2x *bp)
876 {
877         if (bp->common.int_block == INT_BLOCK_HC)
878                 bnx2x_hc_int_disable(bp);
879         else
880                 bnx2x_igu_int_disable(bp);
881 }
882
883 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
884 {
885         int i;
886         u16 j;
887         struct hc_sp_status_block_data sp_sb_data;
888         int func = BP_FUNC(bp);
889 #ifdef BNX2X_STOP_ON_ERROR
890         u16 start = 0, end = 0;
891         u8 cos;
892 #endif
893         if (disable_int)
894                 bnx2x_int_disable(bp);
895
896         bp->stats_state = STATS_STATE_DISABLED;
897         bp->eth_stats.unrecoverable_error++;
898         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
899
900         BNX2X_ERR("begin crash dump -----------------\n");
901
902         /* Indices */
903         /* Common */
904         BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
905                   bp->def_idx, bp->def_att_idx, bp->attn_state,
906                   bp->spq_prod_idx, bp->stats_counter);
907         BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
908                   bp->def_status_blk->atten_status_block.attn_bits,
909                   bp->def_status_blk->atten_status_block.attn_bits_ack,
910                   bp->def_status_blk->atten_status_block.status_block_id,
911                   bp->def_status_blk->atten_status_block.attn_bits_index);
912         BNX2X_ERR("     def (");
913         for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
914                 pr_cont("0x%x%s",
915                         bp->def_status_blk->sp_sb.index_values[i],
916                         (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
917
918         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
919                 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
920                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
921                         i*sizeof(u32));
922
923         pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) pf_id(0x%x)  vnic_id(0x%x)  vf_id(0x%x)  vf_valid (0x%x) state(0x%x)\n",
924                sp_sb_data.igu_sb_id,
925                sp_sb_data.igu_seg_id,
926                sp_sb_data.p_func.pf_id,
927                sp_sb_data.p_func.vnic_id,
928                sp_sb_data.p_func.vf_id,
929                sp_sb_data.p_func.vf_valid,
930                sp_sb_data.state);
931
932         for_each_eth_queue(bp, i) {
933                 struct bnx2x_fastpath *fp = &bp->fp[i];
934                 int loop;
935                 struct hc_status_block_data_e2 sb_data_e2;
936                 struct hc_status_block_data_e1x sb_data_e1x;
937                 struct hc_status_block_sm  *hc_sm_p =
938                         CHIP_IS_E1x(bp) ?
939                         sb_data_e1x.common.state_machine :
940                         sb_data_e2.common.state_machine;
941                 struct hc_index_data *hc_index_p =
942                         CHIP_IS_E1x(bp) ?
943                         sb_data_e1x.index_data :
944                         sb_data_e2.index_data;
945                 u8 data_size, cos;
946                 u32 *sb_data_p;
947                 struct bnx2x_fp_txdata txdata;
948
949                 /* Rx */
950                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)  rx_comp_prod(0x%x)  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
951                           i, fp->rx_bd_prod, fp->rx_bd_cons,
952                           fp->rx_comp_prod,
953                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
954                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)  fp_hc_idx(0x%x)\n",
955                           fp->rx_sge_prod, fp->last_max_sge,
956                           le16_to_cpu(fp->fp_hc_idx));
957
958                 /* Tx */
959                 for_each_cos_in_tx_queue(fp, cos)
960                 {
961                         txdata = *fp->txdata_ptr[cos];
962                         BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)  *tx_cons_sb(0x%x)\n",
963                                   i, txdata.tx_pkt_prod,
964                                   txdata.tx_pkt_cons, txdata.tx_bd_prod,
965                                   txdata.tx_bd_cons,
966                                   le16_to_cpu(*txdata.tx_cons_sb));
967                 }
968
969                 loop = CHIP_IS_E1x(bp) ?
970                         HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
971
972                 /* host sb data */
973
974                 if (IS_FCOE_FP(fp))
975                         continue;
976
977                 BNX2X_ERR("     run indexes (");
978                 for (j = 0; j < HC_SB_MAX_SM; j++)
979                         pr_cont("0x%x%s",
980                                fp->sb_running_index[j],
981                                (j == HC_SB_MAX_SM - 1) ? ")" : " ");
982
983                 BNX2X_ERR("     indexes (");
984                 for (j = 0; j < loop; j++)
985                         pr_cont("0x%x%s",
986                                fp->sb_index_values[j],
987                                (j == loop - 1) ? ")" : " ");
988                 /* fw sb data */
989                 data_size = CHIP_IS_E1x(bp) ?
990                         sizeof(struct hc_status_block_data_e1x) :
991                         sizeof(struct hc_status_block_data_e2);
992                 data_size /= sizeof(u32);
993                 sb_data_p = CHIP_IS_E1x(bp) ?
994                         (u32 *)&sb_data_e1x :
995                         (u32 *)&sb_data_e2;
996                 /* copy sb data in here */
997                 for (j = 0; j < data_size; j++)
998                         *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
999                                 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1000                                 j * sizeof(u32));
1001
1002                 if (!CHIP_IS_E1x(bp)) {
1003                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1004                                 sb_data_e2.common.p_func.pf_id,
1005                                 sb_data_e2.common.p_func.vf_id,
1006                                 sb_data_e2.common.p_func.vf_valid,
1007                                 sb_data_e2.common.p_func.vnic_id,
1008                                 sb_data_e2.common.same_igu_sb_1b,
1009                                 sb_data_e2.common.state);
1010                 } else {
1011                         pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) vnic_id(0x%x)  same_igu_sb_1b(0x%x) state(0x%x)\n",
1012                                 sb_data_e1x.common.p_func.pf_id,
1013                                 sb_data_e1x.common.p_func.vf_id,
1014                                 sb_data_e1x.common.p_func.vf_valid,
1015                                 sb_data_e1x.common.p_func.vnic_id,
1016                                 sb_data_e1x.common.same_igu_sb_1b,
1017                                 sb_data_e1x.common.state);
1018                 }
1019
1020                 /* SB_SMs data */
1021                 for (j = 0; j < HC_SB_MAX_SM; j++) {
1022                         pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x)  igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1023                                 j, hc_sm_p[j].__flags,
1024                                 hc_sm_p[j].igu_sb_id,
1025                                 hc_sm_p[j].igu_seg_id,
1026                                 hc_sm_p[j].time_to_expire,
1027                                 hc_sm_p[j].timer_value);
1028                 }
1029
1030                 /* Indices data */
1031                 for (j = 0; j < loop; j++) {
1032                         pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1033                                hc_index_p[j].flags,
1034                                hc_index_p[j].timeout);
1035                 }
1036         }
1037
1038 #ifdef BNX2X_STOP_ON_ERROR
1039
1040         /* event queue */
1041         BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1042         for (i = 0; i < NUM_EQ_DESC; i++) {
1043                 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1044
1045                 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1046                           i, bp->eq_ring[i].message.opcode,
1047                           bp->eq_ring[i].message.error);
1048                 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1049         }
1050
1051         /* Rings */
1052         /* Rx */
1053         for_each_valid_rx_queue(bp, i) {
1054                 struct bnx2x_fastpath *fp = &bp->fp[i];
1055
1056                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1057                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1058                 for (j = start; j != end; j = RX_BD(j + 1)) {
1059                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1060                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1061
1062                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
1063                                   i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1064                 }
1065
1066                 start = RX_SGE(fp->rx_sge_prod);
1067                 end = RX_SGE(fp->last_max_sge);
1068                 for (j = start; j != end; j = RX_SGE(j + 1)) {
1069                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1070                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1071
1072                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
1073                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
1074                 }
1075
1076                 start = RCQ_BD(fp->rx_comp_cons - 10);
1077                 end = RCQ_BD(fp->rx_comp_cons + 503);
1078                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1079                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1080
1081                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1082                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1083                 }
1084         }
1085
1086         /* Tx */
1087         for_each_valid_tx_queue(bp, i) {
1088                 struct bnx2x_fastpath *fp = &bp->fp[i];
1089                 for_each_cos_in_tx_queue(fp, cos) {
1090                         struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1091
1092                         start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1093                         end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1094                         for (j = start; j != end; j = TX_BD(j + 1)) {
1095                                 struct sw_tx_bd *sw_bd =
1096                                         &txdata->tx_buf_ring[j];
1097
1098                                 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1099                                           i, cos, j, sw_bd->skb,
1100                                           sw_bd->first_bd);
1101                         }
1102
1103                         start = TX_BD(txdata->tx_bd_cons - 10);
1104                         end = TX_BD(txdata->tx_bd_cons + 254);
1105                         for (j = start; j != end; j = TX_BD(j + 1)) {
1106                                 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1107
1108                                 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1109                                           i, cos, j, tx_bd[0], tx_bd[1],
1110                                           tx_bd[2], tx_bd[3]);
1111                         }
1112                 }
1113         }
1114 #endif
1115         bnx2x_fw_dump(bp);
1116         bnx2x_mc_assert(bp);
1117         BNX2X_ERR("end crash dump -----------------\n");
1118 }
1119
1120 /*
1121  * FLR Support for E2
1122  *
1123  * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1124  * initialization.
1125  */
1126 #define FLR_WAIT_USEC           10000   /* 10 milliseconds */
1127 #define FLR_WAIT_INTERVAL       50      /* usec */
1128 #define FLR_POLL_CNT            (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
1129
1130 struct pbf_pN_buf_regs {
1131         int pN;
1132         u32 init_crd;
1133         u32 crd;
1134         u32 crd_freed;
1135 };
1136
1137 struct pbf_pN_cmd_regs {
1138         int pN;
1139         u32 lines_occup;
1140         u32 lines_freed;
1141 };
1142
1143 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1144                                      struct pbf_pN_buf_regs *regs,
1145                                      u32 poll_count)
1146 {
1147         u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1148         u32 cur_cnt = poll_count;
1149
1150         crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1151         crd = crd_start = REG_RD(bp, regs->crd);
1152         init_crd = REG_RD(bp, regs->init_crd);
1153
1154         DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1155         DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
1156         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1157
1158         while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1159                (init_crd - crd_start))) {
1160                 if (cur_cnt--) {
1161                         udelay(FLR_WAIT_INTERVAL);
1162                         crd = REG_RD(bp, regs->crd);
1163                         crd_freed = REG_RD(bp, regs->crd_freed);
1164                 } else {
1165                         DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1166                            regs->pN);
1167                         DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
1168                            regs->pN, crd);
1169                         DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1170                            regs->pN, crd_freed);
1171                         break;
1172                 }
1173         }
1174         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1175            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1176 }
1177
1178 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1179                                      struct pbf_pN_cmd_regs *regs,
1180                                      u32 poll_count)
1181 {
1182         u32 occup, to_free, freed, freed_start;
1183         u32 cur_cnt = poll_count;
1184
1185         occup = to_free = REG_RD(bp, regs->lines_occup);
1186         freed = freed_start = REG_RD(bp, regs->lines_freed);
1187
1188         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
1189         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1190
1191         while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1192                 if (cur_cnt--) {
1193                         udelay(FLR_WAIT_INTERVAL);
1194                         occup = REG_RD(bp, regs->lines_occup);
1195                         freed = REG_RD(bp, regs->lines_freed);
1196                 } else {
1197                         DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1198                            regs->pN);
1199                         DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
1200                            regs->pN, occup);
1201                         DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1202                            regs->pN, freed);
1203                         break;
1204                 }
1205         }
1206         DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1207            poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1208 }
1209
1210 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1211                                     u32 expected, u32 poll_count)
1212 {
1213         u32 cur_cnt = poll_count;
1214         u32 val;
1215
1216         while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1217                 udelay(FLR_WAIT_INTERVAL);
1218
1219         return val;
1220 }
1221
1222 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1223                                     char *msg, u32 poll_cnt)
1224 {
1225         u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1226         if (val != 0) {
1227                 BNX2X_ERR("%s usage count=%d\n", msg, val);
1228                 return 1;
1229         }
1230         return 0;
1231 }
1232
1233 /* Common routines with VF FLR cleanup */
1234 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1235 {
1236         /* adjust polling timeout */
1237         if (CHIP_REV_IS_EMUL(bp))
1238                 return FLR_POLL_CNT * 2000;
1239
1240         if (CHIP_REV_IS_FPGA(bp))
1241                 return FLR_POLL_CNT * 120;
1242
1243         return FLR_POLL_CNT;
1244 }
1245
1246 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1247 {
1248         struct pbf_pN_cmd_regs cmd_regs[] = {
1249                 {0, (CHIP_IS_E3B0(bp)) ?
1250                         PBF_REG_TQ_OCCUPANCY_Q0 :
1251                         PBF_REG_P0_TQ_OCCUPANCY,
1252                     (CHIP_IS_E3B0(bp)) ?
1253                         PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1254                         PBF_REG_P0_TQ_LINES_FREED_CNT},
1255                 {1, (CHIP_IS_E3B0(bp)) ?
1256                         PBF_REG_TQ_OCCUPANCY_Q1 :
1257                         PBF_REG_P1_TQ_OCCUPANCY,
1258                     (CHIP_IS_E3B0(bp)) ?
1259                         PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1260                         PBF_REG_P1_TQ_LINES_FREED_CNT},
1261                 {4, (CHIP_IS_E3B0(bp)) ?
1262                         PBF_REG_TQ_OCCUPANCY_LB_Q :
1263                         PBF_REG_P4_TQ_OCCUPANCY,
1264                     (CHIP_IS_E3B0(bp)) ?
1265                         PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1266                         PBF_REG_P4_TQ_LINES_FREED_CNT}
1267         };
1268
1269         struct pbf_pN_buf_regs buf_regs[] = {
1270                 {0, (CHIP_IS_E3B0(bp)) ?
1271                         PBF_REG_INIT_CRD_Q0 :
1272                         PBF_REG_P0_INIT_CRD ,
1273                     (CHIP_IS_E3B0(bp)) ?
1274                         PBF_REG_CREDIT_Q0 :
1275                         PBF_REG_P0_CREDIT,
1276                     (CHIP_IS_E3B0(bp)) ?
1277                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1278                         PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1279                 {1, (CHIP_IS_E3B0(bp)) ?
1280                         PBF_REG_INIT_CRD_Q1 :
1281                         PBF_REG_P1_INIT_CRD,
1282                     (CHIP_IS_E3B0(bp)) ?
1283                         PBF_REG_CREDIT_Q1 :
1284                         PBF_REG_P1_CREDIT,
1285                     (CHIP_IS_E3B0(bp)) ?
1286                         PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1287                         PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1288                 {4, (CHIP_IS_E3B0(bp)) ?
1289                         PBF_REG_INIT_CRD_LB_Q :
1290                         PBF_REG_P4_INIT_CRD,
1291                     (CHIP_IS_E3B0(bp)) ?
1292                         PBF_REG_CREDIT_LB_Q :
1293                         PBF_REG_P4_CREDIT,
1294                     (CHIP_IS_E3B0(bp)) ?
1295                         PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1296                         PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1297         };
1298
1299         int i;
1300
1301         /* Verify the command queues are flushed P0, P1, P4 */
1302         for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1303                 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1304
1305         /* Verify the transmission buffers are flushed P0, P1, P4 */
1306         for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1307                 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1308 }
1309
1310 #define OP_GEN_PARAM(param) \
1311         (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1312
1313 #define OP_GEN_TYPE(type) \
1314         (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1315
1316 #define OP_GEN_AGG_VECT(index) \
1317         (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1318
1319 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1320 {
1321         u32 op_gen_command = 0;
1322         u32 comp_addr = BAR_CSTRORM_INTMEM +
1323                         CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1324         int ret = 0;
1325
1326         if (REG_RD(bp, comp_addr)) {
1327                 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1328                 return 1;
1329         }
1330
1331         op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1332         op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1333         op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1334         op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1335
1336         DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1337         REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1338
1339         if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1340                 BNX2X_ERR("FW final cleanup did not succeed\n");
1341                 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1342                    (REG_RD(bp, comp_addr)));
1343                 bnx2x_panic();
1344                 return 1;
1345         }
1346         /* Zero completion for next FLR */
1347         REG_WR(bp, comp_addr, 0);
1348
1349         return ret;
1350 }
1351
1352 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1353 {
1354         u16 status;
1355
1356         pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1357         return status & PCI_EXP_DEVSTA_TRPND;
1358 }
1359
1360 /* PF FLR specific routines
1361 */
1362 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1363 {
1364         /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1365         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1366                         CFC_REG_NUM_LCIDS_INSIDE_PF,
1367                         "CFC PF usage counter timed out",
1368                         poll_cnt))
1369                 return 1;
1370
1371         /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1372         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1373                         DORQ_REG_PF_USAGE_CNT,
1374                         "DQ PF usage counter timed out",
1375                         poll_cnt))
1376                 return 1;
1377
1378         /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1379         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1380                         QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1381                         "QM PF usage counter timed out",
1382                         poll_cnt))
1383                 return 1;
1384
1385         /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1386         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1387                         TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1388                         "Timers VNIC usage counter timed out",
1389                         poll_cnt))
1390                 return 1;
1391         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1392                         TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1393                         "Timers NUM_SCANS usage counter timed out",
1394                         poll_cnt))
1395                 return 1;
1396
1397         /* Wait DMAE PF usage counter to zero */
1398         if (bnx2x_flr_clnup_poll_hw_counter(bp,
1399                         dmae_reg_go_c[INIT_DMAE_C(bp)],
1400                         "DMAE command register timed out",
1401                         poll_cnt))
1402                 return 1;
1403
1404         return 0;
1405 }
1406
1407 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1408 {
1409         u32 val;
1410
1411         val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1412         DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1413
1414         val = REG_RD(bp, PBF_REG_DISABLE_PF);
1415         DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1416
1417         val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1418         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1419
1420         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1421         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1422
1423         val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1424         DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1425
1426         val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1427         DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1428
1429         val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1430         DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1431
1432         val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1433         DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1434            val);
1435 }
1436
1437 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1438 {
1439         u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1440
1441         DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1442
1443         /* Re-enable PF target read access */
1444         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1445
1446         /* Poll HW usage counters */
1447         DP(BNX2X_MSG_SP, "Polling usage counters\n");
1448         if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1449                 return -EBUSY;
1450
1451         /* Zero the igu 'trailing edge' and 'leading edge' */
1452
1453         /* Send the FW cleanup command */
1454         if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1455                 return -EBUSY;
1456
1457         /* ATC cleanup */
1458
1459         /* Verify TX hw is flushed */
1460         bnx2x_tx_hw_flushed(bp, poll_cnt);
1461
1462         /* Wait 100ms (not adjusted according to platform) */
1463         msleep(100);
1464
1465         /* Verify no pending pci transactions */
1466         if (bnx2x_is_pcie_pending(bp->pdev))
1467                 BNX2X_ERR("PCIE Transactions still pending\n");
1468
1469         /* Debug */
1470         bnx2x_hw_enable_status(bp);
1471
1472         /*
1473          * Master enable - Due to WB DMAE writes performed before this
1474          * register is re-initialized as part of the regular function init
1475          */
1476         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1477
1478         return 0;
1479 }
1480
1481 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1482 {
1483         int port = BP_PORT(bp);
1484         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1485         u32 val = REG_RD(bp, addr);
1486         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1487         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1488         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1489
1490         if (msix) {
1491                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1492                          HC_CONFIG_0_REG_INT_LINE_EN_0);
1493                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1494                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1495                 if (single_msix)
1496                         val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1497         } else if (msi) {
1498                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1499                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1500                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1501                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1502         } else {
1503                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1504                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1505                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
1506                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1507
1508                 if (!CHIP_IS_E1(bp)) {
1509                         DP(NETIF_MSG_IFUP,
1510                            "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1511
1512                         REG_WR(bp, addr, val);
1513
1514                         val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1515                 }
1516         }
1517
1518         if (CHIP_IS_E1(bp))
1519                 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1520
1521         DP(NETIF_MSG_IFUP,
1522            "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1523            (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1524
1525         REG_WR(bp, addr, val);
1526         /*
1527          * Ensure that HC_CONFIG is written before leading/trailing edge config
1528          */
1529         mmiowb();
1530         barrier();
1531
1532         if (!CHIP_IS_E1(bp)) {
1533                 /* init leading/trailing edge */
1534                 if (IS_MF(bp)) {
1535                         val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1536                         if (bp->port.pmf)
1537                                 /* enable nig and gpio3 attention */
1538                                 val |= 0x1100;
1539                 } else
1540                         val = 0xffff;
1541
1542                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1543                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1544         }
1545
1546         /* Make sure that interrupts are indeed enabled from here on */
1547         mmiowb();
1548 }
1549
1550 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1551 {
1552         u32 val;
1553         bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1554         bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1555         bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1556
1557         val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1558
1559         if (msix) {
1560                 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1561                          IGU_PF_CONF_SINGLE_ISR_EN);
1562                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1563                         IGU_PF_CONF_ATTN_BIT_EN);
1564
1565                 if (single_msix)
1566                         val |= IGU_PF_CONF_SINGLE_ISR_EN;
1567         } else if (msi) {
1568                 val &= ~IGU_PF_CONF_INT_LINE_EN;
1569                 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1570                         IGU_PF_CONF_ATTN_BIT_EN |
1571                         IGU_PF_CONF_SINGLE_ISR_EN);
1572         } else {
1573                 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1574                 val |= (IGU_PF_CONF_INT_LINE_EN |
1575                         IGU_PF_CONF_ATTN_BIT_EN |
1576                         IGU_PF_CONF_SINGLE_ISR_EN);
1577         }
1578
1579         /* Clean previous status - need to configure igu prior to ack*/
1580         if ((!msix) || single_msix) {
1581                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1582                 bnx2x_ack_int(bp);
1583         }
1584
1585         val |= IGU_PF_CONF_FUNC_EN;
1586
1587         DP(NETIF_MSG_IFUP, "write 0x%x to IGU  mode %s\n",
1588            val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1589
1590         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1591
1592         if (val & IGU_PF_CONF_INT_LINE_EN)
1593                 pci_intx(bp->pdev, true);
1594
1595         barrier();
1596
1597         /* init leading/trailing edge */
1598         if (IS_MF(bp)) {
1599                 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1600                 if (bp->port.pmf)
1601                         /* enable nig and gpio3 attention */
1602                         val |= 0x1100;
1603         } else
1604                 val = 0xffff;
1605
1606         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1607         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1608
1609         /* Make sure that interrupts are indeed enabled from here on */
1610         mmiowb();
1611 }
1612
1613 void bnx2x_int_enable(struct bnx2x *bp)
1614 {
1615         if (bp->common.int_block == INT_BLOCK_HC)
1616                 bnx2x_hc_int_enable(bp);
1617         else
1618                 bnx2x_igu_int_enable(bp);
1619 }
1620
1621 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1622 {
1623         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1624         int i, offset;
1625
1626         if (disable_hw)
1627                 /* prevent the HW from sending interrupts */
1628                 bnx2x_int_disable(bp);
1629
1630         /* make sure all ISRs are done */
1631         if (msix) {
1632                 synchronize_irq(bp->msix_table[0].vector);
1633                 offset = 1;
1634                 if (CNIC_SUPPORT(bp))
1635                         offset++;
1636                 for_each_eth_queue(bp, i)
1637                         synchronize_irq(bp->msix_table[offset++].vector);
1638         } else
1639                 synchronize_irq(bp->pdev->irq);
1640
1641         /* make sure sp_task is not running */
1642         cancel_delayed_work(&bp->sp_task);
1643         cancel_delayed_work(&bp->period_task);
1644         flush_workqueue(bnx2x_wq);
1645 }
1646
1647 /* fast path */
1648
1649 /*
1650  * General service functions
1651  */
1652
1653 /* Return true if succeeded to acquire the lock */
1654 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1655 {
1656         u32 lock_status;
1657         u32 resource_bit = (1 << resource);
1658         int func = BP_FUNC(bp);
1659         u32 hw_lock_control_reg;
1660
1661         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1662            "Trying to take a lock on resource %d\n", resource);
1663
1664         /* Validating that the resource is within range */
1665         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1666                 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1667                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1668                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1669                 return false;
1670         }
1671
1672         if (func <= 5)
1673                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1674         else
1675                 hw_lock_control_reg =
1676                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1677
1678         /* Try to acquire the lock */
1679         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1680         lock_status = REG_RD(bp, hw_lock_control_reg);
1681         if (lock_status & resource_bit)
1682                 return true;
1683
1684         DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1685            "Failed to get a lock on resource %d\n", resource);
1686         return false;
1687 }
1688
1689 /**
1690  * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1691  *
1692  * @bp: driver handle
1693  *
1694  * Returns the recovery leader resource id according to the engine this function
1695  * belongs to. Currently only only 2 engines is supported.
1696  */
1697 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1698 {
1699         if (BP_PATH(bp))
1700                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1701         else
1702                 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1703 }
1704
1705 /**
1706  * bnx2x_trylock_leader_lock- try to acquire a leader lock.
1707  *
1708  * @bp: driver handle
1709  *
1710  * Tries to acquire a leader lock for current engine.
1711  */
1712 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1713 {
1714         return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1715 }
1716
1717 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1718
1719 /* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1720 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1721 {
1722         /* Set the interrupt occurred bit for the sp-task to recognize it
1723          * must ack the interrupt and transition according to the IGU
1724          * state machine.
1725          */
1726         atomic_set(&bp->interrupt_occurred, 1);
1727
1728         /* The sp_task must execute only after this bit
1729          * is set, otherwise we will get out of sync and miss all
1730          * further interrupts. Hence, the barrier.
1731          */
1732         smp_wmb();
1733
1734         /* schedule sp_task to workqueue */
1735         return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1736 }
1737
1738 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1739 {
1740         struct bnx2x *bp = fp->bp;
1741         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1742         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1743         enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1744         struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1745
1746         DP(BNX2X_MSG_SP,
1747            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1748            fp->index, cid, command, bp->state,
1749            rr_cqe->ramrod_cqe.ramrod_type);
1750
1751         /* If cid is within VF range, replace the slowpath object with the
1752          * one corresponding to this VF
1753          */
1754         if (cid >= BNX2X_FIRST_VF_CID  &&
1755             cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1756                 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1757
1758         switch (command) {
1759         case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1760                 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1761                 drv_cmd = BNX2X_Q_CMD_UPDATE;
1762                 break;
1763
1764         case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1765                 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1766                 drv_cmd = BNX2X_Q_CMD_SETUP;
1767                 break;
1768
1769         case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1770                 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1771                 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1772                 break;
1773
1774         case (RAMROD_CMD_ID_ETH_HALT):
1775                 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1776                 drv_cmd = BNX2X_Q_CMD_HALT;
1777                 break;
1778
1779         case (RAMROD_CMD_ID_ETH_TERMINATE):
1780                 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1781                 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1782                 break;
1783
1784         case (RAMROD_CMD_ID_ETH_EMPTY):
1785                 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1786                 drv_cmd = BNX2X_Q_CMD_EMPTY;
1787                 break;
1788
1789         default:
1790                 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1791                           command, fp->index);
1792                 return;
1793         }
1794
1795         if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1796             q_obj->complete_cmd(bp, q_obj, drv_cmd))
1797                 /* q_obj->complete_cmd() failure means that this was
1798                  * an unexpected completion.
1799                  *
1800                  * In this case we don't want to increase the bp->spq_left
1801                  * because apparently we haven't sent this command the first
1802                  * place.
1803                  */
1804 #ifdef BNX2X_STOP_ON_ERROR
1805                 bnx2x_panic();
1806 #else
1807                 return;
1808 #endif
1809         /* SRIOV: reschedule any 'in_progress' operations */
1810         bnx2x_iov_sp_event(bp, cid, true);
1811
1812         smp_mb__before_atomic_inc();
1813         atomic_inc(&bp->cq_spq_left);
1814         /* push the change in bp->spq_left and towards the memory */
1815         smp_mb__after_atomic_inc();
1816
1817         DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1818
1819         if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1820             (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1821                 /* if Q update ramrod is completed for last Q in AFEX vif set
1822                  * flow, then ACK MCP at the end
1823                  *
1824                  * mark pending ACK to MCP bit.
1825                  * prevent case that both bits are cleared.
1826                  * At the end of load/unload driver checks that
1827                  * sp_state is cleared, and this order prevents
1828                  * races
1829                  */
1830                 smp_mb__before_clear_bit();
1831                 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1832                 wmb();
1833                 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1834                 smp_mb__after_clear_bit();
1835
1836                 /* schedule the sp task as mcp ack is required */
1837                 bnx2x_schedule_sp_task(bp);
1838         }
1839
1840         return;
1841 }
1842
1843 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1844 {
1845         struct bnx2x *bp = netdev_priv(dev_instance);
1846         u16 status = bnx2x_ack_int(bp);
1847         u16 mask;
1848         int i;
1849         u8 cos;
1850
1851         /* Return here if interrupt is shared and it's not for us */
1852         if (unlikely(status == 0)) {
1853                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1854                 return IRQ_NONE;
1855         }
1856         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1857
1858 #ifdef BNX2X_STOP_ON_ERROR
1859         if (unlikely(bp->panic))
1860                 return IRQ_HANDLED;
1861 #endif
1862
1863         for_each_eth_queue(bp, i) {
1864                 struct bnx2x_fastpath *fp = &bp->fp[i];
1865
1866                 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1867                 if (status & mask) {
1868                         /* Handle Rx or Tx according to SB id */
1869                         for_each_cos_in_tx_queue(fp, cos)
1870                                 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1871                         prefetch(&fp->sb_running_index[SM_RX_ID]);
1872                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1873                         status &= ~mask;
1874                 }
1875         }
1876
1877         if (CNIC_SUPPORT(bp)) {
1878                 mask = 0x2;
1879                 if (status & (mask | 0x1)) {
1880                         struct cnic_ops *c_ops = NULL;
1881
1882                         rcu_read_lock();
1883                         c_ops = rcu_dereference(bp->cnic_ops);
1884                         if (c_ops && (bp->cnic_eth_dev.drv_state &
1885                                       CNIC_DRV_STATE_HANDLES_IRQ))
1886                                 c_ops->cnic_handler(bp->cnic_data, NULL);
1887                         rcu_read_unlock();
1888
1889                         status &= ~mask;
1890                 }
1891         }
1892
1893         if (unlikely(status & 0x1)) {
1894
1895                 /* schedule sp task to perform default status block work, ack
1896                  * attentions and enable interrupts.
1897                  */
1898                 bnx2x_schedule_sp_task(bp);
1899
1900                 status &= ~0x1;
1901                 if (!status)
1902                         return IRQ_HANDLED;
1903         }
1904
1905         if (unlikely(status))
1906                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1907                    status);
1908
1909         return IRQ_HANDLED;
1910 }
1911
1912 /* Link */
1913
1914 /*
1915  * General service functions
1916  */
1917
1918 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1919 {
1920         u32 lock_status;
1921         u32 resource_bit = (1 << resource);
1922         int func = BP_FUNC(bp);
1923         u32 hw_lock_control_reg;
1924         int cnt;
1925
1926         /* Validating that the resource is within range */
1927         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1928                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1929                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1930                 return -EINVAL;
1931         }
1932
1933         if (func <= 5) {
1934                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1935         } else {
1936                 hw_lock_control_reg =
1937                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1938         }
1939
1940         /* Validating that the resource is not already taken */
1941         lock_status = REG_RD(bp, hw_lock_control_reg);
1942         if (lock_status & resource_bit) {
1943                 BNX2X_ERR("lock_status 0x%x  resource_bit 0x%x\n",
1944                    lock_status, resource_bit);
1945                 return -EEXIST;
1946         }
1947
1948         /* Try for 5 second every 5ms */
1949         for (cnt = 0; cnt < 1000; cnt++) {
1950                 /* Try to acquire the lock */
1951                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1952                 lock_status = REG_RD(bp, hw_lock_control_reg);
1953                 if (lock_status & resource_bit)
1954                         return 0;
1955
1956                 usleep_range(5000, 10000);
1957         }
1958         BNX2X_ERR("Timeout\n");
1959         return -EAGAIN;
1960 }
1961
1962 int bnx2x_release_leader_lock(struct bnx2x *bp)
1963 {
1964         return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1965 }
1966
1967 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1968 {
1969         u32 lock_status;
1970         u32 resource_bit = (1 << resource);
1971         int func = BP_FUNC(bp);
1972         u32 hw_lock_control_reg;
1973
1974         /* Validating that the resource is within range */
1975         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1976                 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1977                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1978                 return -EINVAL;
1979         }
1980
1981         if (func <= 5) {
1982                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1983         } else {
1984                 hw_lock_control_reg =
1985                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1986         }
1987
1988         /* Validating that the resource is currently taken */
1989         lock_status = REG_RD(bp, hw_lock_control_reg);
1990         if (!(lock_status & resource_bit)) {
1991                 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
1992                           lock_status, resource_bit);
1993                 return -EFAULT;
1994         }
1995
1996         REG_WR(bp, hw_lock_control_reg, resource_bit);
1997         return 0;
1998 }
1999
2000 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2001 {
2002         /* The GPIO should be swapped if swap register is set and active */
2003         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2004                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2005         int gpio_shift = gpio_num +
2006                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2007         u32 gpio_mask = (1 << gpio_shift);
2008         u32 gpio_reg;
2009         int value;
2010
2011         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2012                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2013                 return -EINVAL;
2014         }
2015
2016         /* read GPIO value */
2017         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2018
2019         /* get the requested pin value */
2020         if ((gpio_reg & gpio_mask) == gpio_mask)
2021                 value = 1;
2022         else
2023                 value = 0;
2024
2025         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2026
2027         return value;
2028 }
2029
2030 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2031 {
2032         /* The GPIO should be swapped if swap register is set and active */
2033         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2034                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2035         int gpio_shift = gpio_num +
2036                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2037         u32 gpio_mask = (1 << gpio_shift);
2038         u32 gpio_reg;
2039
2040         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2041                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2042                 return -EINVAL;
2043         }
2044
2045         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2046         /* read GPIO and mask except the float bits */
2047         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2048
2049         switch (mode) {
2050         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2051                 DP(NETIF_MSG_LINK,
2052                    "Set GPIO %d (shift %d) -> output low\n",
2053                    gpio_num, gpio_shift);
2054                 /* clear FLOAT and set CLR */
2055                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2056                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2057                 break;
2058
2059         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2060                 DP(NETIF_MSG_LINK,
2061                    "Set GPIO %d (shift %d) -> output high\n",
2062                    gpio_num, gpio_shift);
2063                 /* clear FLOAT and set SET */
2064                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2065                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2066                 break;
2067
2068         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2069                 DP(NETIF_MSG_LINK,
2070                    "Set GPIO %d (shift %d) -> input\n",
2071                    gpio_num, gpio_shift);
2072                 /* set FLOAT */
2073                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074                 break;
2075
2076         default:
2077                 break;
2078         }
2079
2080         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2081         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2082
2083         return 0;
2084 }
2085
2086 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2087 {
2088         u32 gpio_reg = 0;
2089         int rc = 0;
2090
2091         /* Any port swapping should be handled by caller. */
2092
2093         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2094         /* read GPIO and mask except the float bits */
2095         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2096         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2097         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2098         gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2099
2100         switch (mode) {
2101         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2102                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2103                 /* set CLR */
2104                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2105                 break;
2106
2107         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2108                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2109                 /* set SET */
2110                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2111                 break;
2112
2113         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2114                 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2115                 /* set FLOAT */
2116                 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2117                 break;
2118
2119         default:
2120                 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2121                 rc = -EINVAL;
2122                 break;
2123         }
2124
2125         if (rc == 0)
2126                 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2127
2128         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2129
2130         return rc;
2131 }
2132
2133 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2134 {
2135         /* The GPIO should be swapped if swap register is set and active */
2136         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2137                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2138         int gpio_shift = gpio_num +
2139                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2140         u32 gpio_mask = (1 << gpio_shift);
2141         u32 gpio_reg;
2142
2143         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2144                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2145                 return -EINVAL;
2146         }
2147
2148         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2149         /* read GPIO int */
2150         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2151
2152         switch (mode) {
2153         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2154                 DP(NETIF_MSG_LINK,
2155                    "Clear GPIO INT %d (shift %d) -> output low\n",
2156                    gpio_num, gpio_shift);
2157                 /* clear SET and set CLR */
2158                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2159                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2160                 break;
2161
2162         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2163                 DP(NETIF_MSG_LINK,
2164                    "Set GPIO INT %d (shift %d) -> output high\n",
2165                    gpio_num, gpio_shift);
2166                 /* clear CLR and set SET */
2167                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2168                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2169                 break;
2170
2171         default:
2172                 break;
2173         }
2174
2175         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2176         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2177
2178         return 0;
2179 }
2180
2181 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2182 {
2183         u32 spio_reg;
2184
2185         /* Only 2 SPIOs are configurable */
2186         if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2187                 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2188                 return -EINVAL;
2189         }
2190
2191         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2192         /* read SPIO and mask except the float bits */
2193         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2194
2195         switch (mode) {
2196         case MISC_SPIO_OUTPUT_LOW:
2197                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2198                 /* clear FLOAT and set CLR */
2199                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2200                 spio_reg |=  (spio << MISC_SPIO_CLR_POS);
2201                 break;
2202
2203         case MISC_SPIO_OUTPUT_HIGH:
2204                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2205                 /* clear FLOAT and set SET */
2206                 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2207                 spio_reg |=  (spio << MISC_SPIO_SET_POS);
2208                 break;
2209
2210         case MISC_SPIO_INPUT_HI_Z:
2211                 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2212                 /* set FLOAT */
2213                 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2214                 break;
2215
2216         default:
2217                 break;
2218         }
2219
2220         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2221         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2222
2223         return 0;
2224 }
2225
2226 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2227 {
2228         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2229         switch (bp->link_vars.ieee_fc &
2230                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2231         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2232                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2233                                                    ADVERTISED_Pause);
2234                 break;
2235
2236         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2237                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2238                                                   ADVERTISED_Pause);
2239                 break;
2240
2241         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2242                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2243                 break;
2244
2245         default:
2246                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2247                                                    ADVERTISED_Pause);
2248                 break;
2249         }
2250 }
2251
2252 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2253 {
2254         /* Initialize link parameters structure variables
2255          * It is recommended to turn off RX FC for jumbo frames
2256          *  for better performance
2257          */
2258         if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2259                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2260         else
2261                 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2262 }
2263
2264 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2265 {
2266         int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2267         u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2268
2269         if (!BP_NOMCP(bp)) {
2270                 bnx2x_set_requested_fc(bp);
2271                 bnx2x_acquire_phy_lock(bp);
2272
2273                 if (load_mode == LOAD_DIAG) {
2274                         struct link_params *lp = &bp->link_params;
2275                         lp->loopback_mode = LOOPBACK_XGXS;
2276                         /* do PHY loopback at 10G speed, if possible */
2277                         if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2278                                 if (lp->speed_cap_mask[cfx_idx] &
2279                                     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2280                                         lp->req_line_speed[cfx_idx] =
2281                                         SPEED_10000;
2282                                 else
2283                                         lp->req_line_speed[cfx_idx] =
2284                                         SPEED_1000;
2285                         }
2286                 }
2287
2288                 if (load_mode == LOAD_LOOPBACK_EXT) {
2289                         struct link_params *lp = &bp->link_params;
2290                         lp->loopback_mode = LOOPBACK_EXT;
2291                 }
2292
2293                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2294
2295                 bnx2x_release_phy_lock(bp);
2296
2297                 bnx2x_calc_fc_adv(bp);
2298
2299                 if (bp->link_vars.link_up) {
2300                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2301                         bnx2x_link_report(bp);
2302                 }
2303                 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2304                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2305                 return rc;
2306         }
2307         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2308         return -EINVAL;
2309 }
2310
2311 void bnx2x_link_set(struct bnx2x *bp)
2312 {
2313         if (!BP_NOMCP(bp)) {
2314                 bnx2x_acquire_phy_lock(bp);
2315                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2316                 bnx2x_release_phy_lock(bp);
2317
2318                 bnx2x_calc_fc_adv(bp);
2319         } else
2320                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2321 }
2322
2323 static void bnx2x__link_reset(struct bnx2x *bp)
2324 {
2325         if (!BP_NOMCP(bp)) {
2326                 bnx2x_acquire_phy_lock(bp);
2327                 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2328                 bnx2x_release_phy_lock(bp);
2329         } else
2330                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2331 }
2332
2333 void bnx2x_force_link_reset(struct bnx2x *bp)
2334 {
2335         bnx2x_acquire_phy_lock(bp);
2336         bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2337         bnx2x_release_phy_lock(bp);
2338 }
2339
2340 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2341 {
2342         u8 rc = 0;
2343
2344         if (!BP_NOMCP(bp)) {
2345                 bnx2x_acquire_phy_lock(bp);
2346                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2347                                      is_serdes);
2348                 bnx2x_release_phy_lock(bp);
2349         } else
2350                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2351
2352         return rc;
2353 }
2354
2355 /* Calculates the sum of vn_min_rates.
2356    It's needed for further normalizing of the min_rates.
2357    Returns:
2358      sum of vn_min_rates.
2359        or
2360      0 - if all the min_rates are 0.
2361      In the later case fairness algorithm should be deactivated.
2362      If not all min_rates are zero then those that are zeroes will be set to 1.
2363  */
2364 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2365                                       struct cmng_init_input *input)
2366 {
2367         int all_zero = 1;
2368         int vn;
2369
2370         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2371                 u32 vn_cfg = bp->mf_config[vn];
2372                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2373                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2374
2375                 /* Skip hidden vns */
2376                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2377                         vn_min_rate = 0;
2378                 /* If min rate is zero - set it to 1 */
2379                 else if (!vn_min_rate)
2380                         vn_min_rate = DEF_MIN_RATE;
2381                 else
2382                         all_zero = 0;
2383
2384                 input->vnic_min_rate[vn] = vn_min_rate;
2385         }
2386
2387         /* if ETS or all min rates are zeros - disable fairness */
2388         if (BNX2X_IS_ETS_ENABLED(bp)) {
2389                 input->flags.cmng_enables &=
2390                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2391                 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2392         } else if (all_zero) {
2393                 input->flags.cmng_enables &=
2394                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2395                 DP(NETIF_MSG_IFUP,
2396                    "All MIN values are zeroes fairness will be disabled\n");
2397         } else
2398                 input->flags.cmng_enables |=
2399                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2400 }
2401
2402 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2403                                     struct cmng_init_input *input)
2404 {
2405         u16 vn_max_rate;
2406         u32 vn_cfg = bp->mf_config[vn];
2407
2408         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2409                 vn_max_rate = 0;
2410         else {
2411                 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2412
2413                 if (IS_MF_SI(bp)) {
2414                         /* maxCfg in percents of linkspeed */
2415                         vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2416                 } else /* SD modes */
2417                         /* maxCfg is absolute in 100Mb units */
2418                         vn_max_rate = maxCfg * 100;
2419         }
2420
2421         DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2422
2423         input->vnic_max_rate[vn] = vn_max_rate;
2424 }
2425
2426 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2427 {
2428         if (CHIP_REV_IS_SLOW(bp))
2429                 return CMNG_FNS_NONE;
2430         if (IS_MF(bp))
2431                 return CMNG_FNS_MINMAX;
2432
2433         return CMNG_FNS_NONE;
2434 }
2435
2436 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2437 {
2438         int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2439
2440         if (BP_NOMCP(bp))
2441                 return; /* what should be the default value in this case */
2442
2443         /* For 2 port configuration the absolute function number formula
2444          * is:
2445          *      abs_func = 2 * vn + BP_PORT + BP_PATH
2446          *
2447          *      and there are 4 functions per port
2448          *
2449          * For 4 port configuration it is
2450          *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2451          *
2452          *      and there are 2 functions per port
2453          */
2454         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2455                 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2456
2457                 if (func >= E1H_FUNC_MAX)
2458                         break;
2459
2460                 bp->mf_config[vn] =
2461                         MF_CFG_RD(bp, func_mf_config[func].config);
2462         }
2463         if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2464                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2465                 bp->flags |= MF_FUNC_DIS;
2466         } else {
2467                 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2468                 bp->flags &= ~MF_FUNC_DIS;
2469         }
2470 }
2471
2472 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2473 {
2474         struct cmng_init_input input;
2475         memset(&input, 0, sizeof(struct cmng_init_input));
2476
2477         input.port_rate = bp->link_vars.line_speed;
2478
2479         if (cmng_type == CMNG_FNS_MINMAX) {
2480                 int vn;
2481
2482                 /* read mf conf from shmem */
2483                 if (read_cfg)
2484                         bnx2x_read_mf_cfg(bp);
2485
2486                 /* vn_weight_sum and enable fairness if not 0 */
2487                 bnx2x_calc_vn_min(bp, &input);
2488
2489                 /* calculate and set min-max rate for each vn */
2490                 if (bp->port.pmf)
2491                         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2492                                 bnx2x_calc_vn_max(bp, vn, &input);
2493
2494                 /* always enable rate shaping and fairness */
2495                 input.flags.cmng_enables |=
2496                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2497
2498                 bnx2x_init_cmng(&input, &bp->cmng);
2499                 return;
2500         }
2501
2502         /* rate shaping and fairness are disabled */
2503         DP(NETIF_MSG_IFUP,
2504            "rate shaping and fairness are disabled\n");
2505 }
2506
2507 static void storm_memset_cmng(struct bnx2x *bp,
2508                               struct cmng_init *cmng,
2509                               u8 port)
2510 {
2511         int vn;
2512         size_t size = sizeof(struct cmng_struct_per_port);
2513
2514         u32 addr = BAR_XSTRORM_INTMEM +
2515                         XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2516
2517         __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2518
2519         for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2520                 int func = func_by_vn(bp, vn);
2521
2522                 addr = BAR_XSTRORM_INTMEM +
2523                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2524                 size = sizeof(struct rate_shaping_vars_per_vn);
2525                 __storm_memset_struct(bp, addr, size,
2526                                       (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2527
2528                 addr = BAR_XSTRORM_INTMEM +
2529                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2530                 size = sizeof(struct fairness_vars_per_vn);
2531                 __storm_memset_struct(bp, addr, size,
2532                                       (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2533         }
2534 }
2535
2536 /* This function is called upon link interrupt */
2537 static void bnx2x_link_attn(struct bnx2x *bp)
2538 {
2539         /* Make sure that we are synced with the current statistics */
2540         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2541
2542         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2543
2544         if (bp->link_vars.link_up) {
2545
2546                 /* dropless flow control */
2547                 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
2548                         int port = BP_PORT(bp);
2549                         u32 pause_enabled = 0;
2550
2551                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2552                                 pause_enabled = 1;
2553
2554                         REG_WR(bp, BAR_USTRORM_INTMEM +
2555                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2556                                pause_enabled);
2557                 }
2558
2559                 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2560                         struct host_port_stats *pstats;
2561
2562                         pstats = bnx2x_sp(bp, port_stats);
2563                         /* reset old mac stats */
2564                         memset(&(pstats->mac_stx[0]), 0,
2565                                sizeof(struct mac_stx));
2566                 }
2567                 if (bp->state == BNX2X_STATE_OPEN)
2568                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2569         }
2570
2571         if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2572                 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2573
2574                 if (cmng_fns != CMNG_FNS_NONE) {
2575                         bnx2x_cmng_fns_init(bp, false, cmng_fns);
2576                         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2577                 } else
2578                         /* rate shaping and fairness are disabled */
2579                         DP(NETIF_MSG_IFUP,
2580                            "single function mode without fairness\n");
2581         }
2582
2583         __bnx2x_link_report(bp);
2584
2585         if (IS_MF(bp))
2586                 bnx2x_link_sync_notify(bp);
2587 }
2588
2589 void bnx2x__link_status_update(struct bnx2x *bp)
2590 {
2591         if (bp->state != BNX2X_STATE_OPEN)
2592                 return;
2593
2594         /* read updated dcb configuration */
2595         if (IS_PF(bp)) {
2596                 bnx2x_dcbx_pmf_update(bp);
2597                 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2598                 if (bp->link_vars.link_up)
2599                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2600                 else
2601                         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2602                         /* indicate link status */
2603                 bnx2x_link_report(bp);
2604
2605         } else { /* VF */
2606                 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2607                                           SUPPORTED_10baseT_Full |
2608                                           SUPPORTED_100baseT_Half |
2609                                           SUPPORTED_100baseT_Full |
2610                                           SUPPORTED_1000baseT_Full |
2611                                           SUPPORTED_2500baseX_Full |
2612                                           SUPPORTED_10000baseT_Full |
2613                                           SUPPORTED_TP |
2614                                           SUPPORTED_FIBRE |
2615                                           SUPPORTED_Autoneg |
2616                                           SUPPORTED_Pause |
2617                                           SUPPORTED_Asym_Pause);
2618                 bp->port.advertising[0] = bp->port.supported[0];
2619
2620                 bp->link_params.bp = bp;
2621                 bp->link_params.port = BP_PORT(bp);
2622                 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2623                 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2624                 bp->link_params.req_line_speed[0] = SPEED_10000;
2625                 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2626                 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2627                 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2628                 bp->link_vars.line_speed = SPEED_10000;
2629                 bp->link_vars.link_status =
2630                         (LINK_STATUS_LINK_UP |
2631                          LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2632                 bp->link_vars.link_up = 1;
2633                 bp->link_vars.duplex = DUPLEX_FULL;
2634                 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2635                 __bnx2x_link_report(bp);
2636                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2637         }
2638 }
2639
2640 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2641                                   u16 vlan_val, u8 allowed_prio)
2642 {
2643         struct bnx2x_func_state_params func_params = {NULL};
2644         struct bnx2x_func_afex_update_params *f_update_params =
2645                 &func_params.params.afex_update;
2646
2647         func_params.f_obj = &bp->func_obj;
2648         func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2649
2650         /* no need to wait for RAMROD completion, so don't
2651          * set RAMROD_COMP_WAIT flag
2652          */
2653
2654         f_update_params->vif_id = vifid;
2655         f_update_params->afex_default_vlan = vlan_val;
2656         f_update_params->allowed_priorities = allowed_prio;
2657
2658         /* if ramrod can not be sent, response to MCP immediately */
2659         if (bnx2x_func_state_change(bp, &func_params) < 0)
2660                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2661
2662         return 0;
2663 }
2664
2665 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2666                                           u16 vif_index, u8 func_bit_map)
2667 {
2668         struct bnx2x_func_state_params func_params = {NULL};
2669         struct bnx2x_func_afex_viflists_params *update_params =
2670                 &func_params.params.afex_viflists;
2671         int rc;
2672         u32 drv_msg_code;
2673
2674         /* validate only LIST_SET and LIST_GET are received from switch */
2675         if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2676                 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2677                           cmd_type);
2678
2679         func_params.f_obj = &bp->func_obj;
2680         func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2681
2682         /* set parameters according to cmd_type */
2683         update_params->afex_vif_list_command = cmd_type;
2684         update_params->vif_list_index = vif_index;
2685         update_params->func_bit_map =
2686                 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2687         update_params->func_to_clear = 0;
2688         drv_msg_code =
2689                 (cmd_type == VIF_LIST_RULE_GET) ?
2690                 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2691                 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2692
2693         /* if ramrod can not be sent, respond to MCP immediately for
2694          * SET and GET requests (other are not triggered from MCP)
2695          */
2696         rc = bnx2x_func_state_change(bp, &func_params);
2697         if (rc < 0)
2698                 bnx2x_fw_command(bp, drv_msg_code, 0);
2699
2700         return 0;
2701 }
2702
2703 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2704 {
2705         struct afex_stats afex_stats;
2706         u32 func = BP_ABS_FUNC(bp);
2707         u32 mf_config;
2708         u16 vlan_val;
2709         u32 vlan_prio;
2710         u16 vif_id;
2711         u8 allowed_prio;
2712         u8 vlan_mode;
2713         u32 addr_to_write, vifid, addrs, stats_type, i;
2714
2715         if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2716                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2717                 DP(BNX2X_MSG_MCP,
2718                    "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2719                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2720         }
2721
2722         if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2723                 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2724                 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2725                 DP(BNX2X_MSG_MCP,
2726                    "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2727                    vifid, addrs);
2728                 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2729                                                addrs);
2730         }
2731
2732         if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2733                 addr_to_write = SHMEM2_RD(bp,
2734                         afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2735                 stats_type = SHMEM2_RD(bp,
2736                         afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2737
2738                 DP(BNX2X_MSG_MCP,
2739                    "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2740                    addr_to_write);
2741
2742                 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2743
2744                 /* write response to scratchpad, for MCP */
2745                 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2746                         REG_WR(bp, addr_to_write + i*sizeof(u32),
2747                                *(((u32 *)(&afex_stats))+i));
2748
2749                 /* send ack message to MCP */
2750                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2751         }
2752
2753         if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2754                 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2755                 bp->mf_config[BP_VN(bp)] = mf_config;
2756                 DP(BNX2X_MSG_MCP,
2757                    "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2758                    mf_config);
2759
2760                 /* if VIF_SET is "enabled" */
2761                 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2762                         /* set rate limit directly to internal RAM */
2763                         struct cmng_init_input cmng_input;
2764                         struct rate_shaping_vars_per_vn m_rs_vn;
2765                         size_t size = sizeof(struct rate_shaping_vars_per_vn);
2766                         u32 addr = BAR_XSTRORM_INTMEM +
2767                             XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2768
2769                         bp->mf_config[BP_VN(bp)] = mf_config;
2770
2771                         bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2772                         m_rs_vn.vn_counter.rate =
2773                                 cmng_input.vnic_max_rate[BP_VN(bp)];
2774                         m_rs_vn.vn_counter.quota =
2775                                 (m_rs_vn.vn_counter.rate *
2776                                  RS_PERIODIC_TIMEOUT_USEC) / 8;
2777
2778                         __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2779
2780                         /* read relevant values from mf_cfg struct in shmem */
2781                         vif_id =
2782                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2783                                  FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2784                                 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2785                         vlan_val =
2786                                 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2787                                  FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2788                                 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2789                         vlan_prio = (mf_config &
2790                                      FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2791                                     FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2792                         vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2793                         vlan_mode =
2794                                 (MF_CFG_RD(bp,
2795                                            func_mf_config[func].afex_config) &
2796                                  FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2797                                 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2798                         allowed_prio =
2799                                 (MF_CFG_RD(bp,
2800                                            func_mf_config[func].afex_config) &
2801                                  FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2802                                 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2803
2804                         /* send ramrod to FW, return in case of failure */
2805                         if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2806                                                    allowed_prio))
2807                                 return;
2808
2809                         bp->afex_def_vlan_tag = vlan_val;
2810                         bp->afex_vlan_mode = vlan_mode;
2811                 } else {
2812                         /* notify link down because BP->flags is disabled */
2813                         bnx2x_link_report(bp);
2814
2815                         /* send INVALID VIF ramrod to FW */
2816                         bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2817
2818                         /* Reset the default afex VLAN */
2819                         bp->afex_def_vlan_tag = -1;
2820                 }
2821         }
2822 }
2823
2824 static void bnx2x_pmf_update(struct bnx2x *bp)
2825 {
2826         int port = BP_PORT(bp);
2827         u32 val;
2828
2829         bp->port.pmf = 1;
2830         DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2831
2832         /*
2833          * We need the mb() to ensure the ordering between the writing to
2834          * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2835          */
2836         smp_mb();
2837
2838         /* queue a periodic task */
2839         queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2840
2841         bnx2x_dcbx_pmf_update(bp);
2842
2843         /* enable nig attention */
2844         val = (0xff0f | (1 << (BP_VN(bp) + 4)));
2845         if (bp->common.int_block == INT_BLOCK_HC) {
2846                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2847                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2848         } else if (!CHIP_IS_E1x(bp)) {
2849                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2850                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2851         }
2852
2853         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2854 }
2855
2856 /* end of Link */
2857
2858 /* slow path */
2859
2860 /*
2861  * General service functions
2862  */
2863
2864 /* send the MCP a request, block until there is a reply */
2865 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2866 {
2867         int mb_idx = BP_FW_MB_IDX(bp);
2868         u32 seq;
2869         u32 rc = 0;
2870         u32 cnt = 1;
2871         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2872
2873         mutex_lock(&bp->fw_mb_mutex);
2874         seq = ++bp->fw_seq;
2875         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2876         SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2877
2878         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2879                         (command | seq), param);
2880
2881         do {
2882                 /* let the FW do it's magic ... */
2883                 msleep(delay);
2884
2885                 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2886
2887                 /* Give the FW up to 5 second (500*10ms) */
2888         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2889
2890         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2891            cnt*delay, rc, seq);
2892
2893         /* is this a reply to our command? */
2894         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2895                 rc &= FW_MSG_CODE_MASK;
2896         else {
2897                 /* FW BUG! */
2898                 BNX2X_ERR("FW failed to respond!\n");
2899                 bnx2x_fw_dump(bp);
2900                 rc = 0;
2901         }
2902         mutex_unlock(&bp->fw_mb_mutex);
2903
2904         return rc;
2905 }
2906
2907 static void storm_memset_func_cfg(struct bnx2x *bp,
2908                                  struct tstorm_eth_function_common_config *tcfg,
2909                                  u16 abs_fid)
2910 {
2911         size_t size = sizeof(struct tstorm_eth_function_common_config);
2912
2913         u32 addr = BAR_TSTRORM_INTMEM +
2914                         TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2915
2916         __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2917 }
2918
2919 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2920 {
2921         if (CHIP_IS_E1x(bp)) {
2922                 struct tstorm_eth_function_common_config tcfg = {0};
2923
2924                 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2925         }
2926
2927         /* Enable the function in the FW */
2928         storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2929         storm_memset_func_en(bp, p->func_id, 1);
2930
2931         /* spq */
2932         if (p->func_flgs & FUNC_FLG_SPQ) {
2933                 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2934                 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2935                        XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2936         }
2937 }
2938
2939 /**
2940  * bnx2x_get_common_flags - Return common flags
2941  *
2942  * @bp          device handle
2943  * @fp          queue handle
2944  * @zero_stats  TRUE if statistics zeroing is needed
2945  *
2946  * Return the flags that are common for the Tx-only and not normal connections.
2947  */
2948 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2949                                             struct bnx2x_fastpath *fp,
2950                                             bool zero_stats)
2951 {
2952         unsigned long flags = 0;
2953
2954         /* PF driver will always initialize the Queue to an ACTIVE state */
2955         __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2956
2957         /* tx only connections collect statistics (on the same index as the
2958          * parent connection). The statistics are zeroed when the parent
2959          * connection is initialized.
2960          */
2961
2962         __set_bit(BNX2X_Q_FLG_STATS, &flags);
2963         if (zero_stats)
2964                 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2965
2966         __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
2967         __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
2968
2969 #ifdef BNX2X_STOP_ON_ERROR
2970         __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
2971 #endif
2972
2973         return flags;
2974 }
2975
2976 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2977                                        struct bnx2x_fastpath *fp,
2978                                        bool leading)
2979 {
2980         unsigned long flags = 0;
2981
2982         /* calculate other queue flags */
2983         if (IS_MF_SD(bp))
2984                 __set_bit(BNX2X_Q_FLG_OV, &flags);
2985
2986         if (IS_FCOE_FP(fp)) {
2987                 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2988                 /* For FCoE - force usage of default priority (for afex) */
2989                 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2990         }
2991
2992         if (!fp->disable_tpa) {
2993                 __set_bit(BNX2X_Q_FLG_TPA, &flags);
2994                 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
2995                 if (fp->mode == TPA_MODE_GRO)
2996                         __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
2997         }
2998
2999         if (leading) {
3000                 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3001                 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3002         }
3003
3004         /* Always set HW VLAN stripping */
3005         __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3006
3007         /* configure silent vlan removal */
3008         if (IS_MF_AFEX(bp))
3009                 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3010
3011         return flags | bnx2x_get_common_flags(bp, fp, true);
3012 }
3013
3014 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3015         struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3016         u8 cos)
3017 {
3018         gen_init->stat_id = bnx2x_stats_id(fp);
3019         gen_init->spcl_id = fp->cl_id;
3020
3021         /* Always use mini-jumbo MTU for FCoE L2 ring */
3022         if (IS_FCOE_FP(fp))
3023                 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3024         else
3025                 gen_init->mtu = bp->dev->mtu;
3026
3027         gen_init->cos = cos;
3028 }
3029
3030 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3031         struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3032         struct bnx2x_rxq_setup_params *rxq_init)
3033 {
3034         u8 max_sge = 0;
3035         u16 sge_sz = 0;
3036         u16 tpa_agg_size = 0;
3037
3038         if (!fp->disable_tpa) {
3039                 pause->sge_th_lo = SGE_TH_LO(bp);
3040                 pause->sge_th_hi = SGE_TH_HI(bp);
3041
3042                 /* validate SGE ring has enough to cross high threshold */
3043                 WARN_ON(bp->dropless_fc &&
3044                                 pause->sge_th_hi + FW_PREFETCH_CNT >
3045                                 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3046
3047                 tpa_agg_size = TPA_AGG_SIZE;
3048                 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3049                         SGE_PAGE_SHIFT;
3050                 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3051                           (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3052                 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3053         }
3054
3055         /* pause - not for e1 */
3056         if (!CHIP_IS_E1(bp)) {
3057                 pause->bd_th_lo = BD_TH_LO(bp);
3058                 pause->bd_th_hi = BD_TH_HI(bp);
3059
3060                 pause->rcq_th_lo = RCQ_TH_LO(bp);
3061                 pause->rcq_th_hi = RCQ_TH_HI(bp);
3062                 /*
3063                  * validate that rings have enough entries to cross
3064                  * high thresholds
3065                  */
3066                 WARN_ON(bp->dropless_fc &&
3067                                 pause->bd_th_hi + FW_PREFETCH_CNT >
3068                                 bp->rx_ring_size);
3069                 WARN_ON(bp->dropless_fc &&
3070                                 pause->rcq_th_hi + FW_PREFETCH_CNT >
3071                                 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3072
3073                 pause->pri_map = 1;
3074         }
3075
3076         /* rxq setup */
3077         rxq_init->dscr_map = fp->rx_desc_mapping;
3078         rxq_init->sge_map = fp->rx_sge_mapping;
3079         rxq_init->rcq_map = fp->rx_comp_mapping;
3080         rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3081
3082         /* This should be a maximum number of data bytes that may be
3083          * placed on the BD (not including paddings).
3084          */
3085         rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3086                            BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3087
3088         rxq_init->cl_qzone_id = fp->cl_qzone_id;
3089         rxq_init->tpa_agg_sz = tpa_agg_size;
3090         rxq_init->sge_buf_sz = sge_sz;
3091         rxq_init->max_sges_pkt = max_sge;
3092         rxq_init->rss_engine_id = BP_FUNC(bp);
3093         rxq_init->mcast_engine_id = BP_FUNC(bp);
3094
3095         /* Maximum number or simultaneous TPA aggregation for this Queue.
3096          *
3097          * For PF Clients it should be the maximum available number.
3098          * VF driver(s) may want to define it to a smaller value.
3099          */
3100         rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3101
3102         rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3103         rxq_init->fw_sb_id = fp->fw_sb_id;
3104
3105         if (IS_FCOE_FP(fp))
3106                 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3107         else
3108                 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3109         /* configure silent vlan removal
3110          * if multi function mode is afex, then mask default vlan
3111          */
3112         if (IS_MF_AFEX(bp)) {
3113                 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3114                 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3115         }
3116 }
3117
3118 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3119         struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3120         u8 cos)
3121 {
3122         txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3123         txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3124         txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3125         txq_init->fw_sb_id = fp->fw_sb_id;
3126
3127         /*
3128          * set the tss leading client id for TX classification ==
3129          * leading RSS client id
3130          */
3131         txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3132
3133         if (IS_FCOE_FP(fp)) {
3134                 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3135                 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3136         }
3137 }
3138
3139 static void bnx2x_pf_init(struct bnx2x *bp)
3140 {
3141         struct bnx2x_func_init_params func_init = {0};
3142         struct event_ring_data eq_data = { {0} };
3143         u16 flags;
3144
3145         if (!CHIP_IS_E1x(bp)) {
3146                 /* reset IGU PF statistics: MSIX + ATTN */
3147                 /* PF */
3148                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3149                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3150                            (CHIP_MODE_IS_4_PORT(bp) ?
3151                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3152                 /* ATTN */
3153                 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3154                            BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3155                            BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3156                            (CHIP_MODE_IS_4_PORT(bp) ?
3157                                 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3158         }
3159
3160         /* function setup flags */
3161         flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3162
3163         /* This flag is relevant for E1x only.
3164          * E2 doesn't have a TPA configuration in a function level.
3165          */
3166         flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
3167
3168         func_init.func_flgs = flags;
3169         func_init.pf_id = BP_FUNC(bp);
3170         func_init.func_id = BP_FUNC(bp);
3171         func_init.spq_map = bp->spq_mapping;
3172         func_init.spq_prod = bp->spq_prod_idx;
3173
3174         bnx2x_func_init(bp, &func_init);
3175
3176         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3177
3178         /*
3179          * Congestion management values depend on the link rate
3180          * There is no active link so initial link rate is set to 10 Gbps.
3181          * When the link comes up The congestion management values are
3182          * re-calculated according to the actual link rate.
3183          */
3184         bp->link_vars.line_speed = SPEED_10000;
3185         bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3186
3187         /* Only the PMF sets the HW */
3188         if (bp->port.pmf)
3189                 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3190
3191         /* init Event Queue - PCI bus guarantees correct endianity*/
3192         eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3193         eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3194         eq_data.producer = bp->eq_prod;
3195         eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3196         eq_data.sb_id = DEF_SB_ID;
3197         storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3198 }
3199
3200 static void bnx2x_e1h_disable(struct bnx2x *bp)
3201 {
3202         int port = BP_PORT(bp);
3203
3204         bnx2x_tx_disable(bp);
3205
3206         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3207 }
3208
3209 static void bnx2x_e1h_enable(struct bnx2x *bp)
3210 {
3211         int port = BP_PORT(bp);
3212
3213         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3214
3215         /* Tx queue should be only re-enabled */
3216         netif_tx_wake_all_queues(bp->dev);
3217
3218         /*
3219          * Should not call netif_carrier_on since it will be called if the link
3220          * is up when checking for link state
3221          */
3222 }
3223
3224 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3225
3226 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3227 {
3228         struct eth_stats_info *ether_stat =
3229                 &bp->slowpath->drv_info_to_mcp.ether_stat;
3230         struct bnx2x_vlan_mac_obj *mac_obj =
3231                 &bp->sp_objs->mac_obj;
3232         int i;
3233
3234         strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3235                 ETH_STAT_INFO_VERSION_LEN);
3236
3237         /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3238          * mac_local field in ether_stat struct. The base address is offset by 2
3239          * bytes to account for the field being 8 bytes but a mac address is
3240          * only 6 bytes. Likewise, the stride for the get_n_elements function is
3241          * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3242          * allocated by the ether_stat struct, so the macs will land in their
3243          * proper positions.
3244          */
3245         for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3246                 memset(ether_stat->mac_local + i, 0,
3247                        sizeof(ether_stat->mac_local[0]));
3248         mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3249                                 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3250                                 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3251                                 ETH_ALEN);
3252         ether_stat->mtu_size = bp->dev->mtu;
3253         if (bp->dev->features & NETIF_F_RXCSUM)
3254                 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3255         if (bp->dev->features & NETIF_F_TSO)
3256                 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3257         ether_stat->feature_flags |= bp->common.boot_mode;
3258
3259         ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3260
3261         ether_stat->txq_size = bp->tx_ring_size;
3262         ether_stat->rxq_size = bp->rx_ring_size;
3263 }
3264
3265 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3266 {
3267         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3268         struct fcoe_stats_info *fcoe_stat =
3269                 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3270
3271         if (!CNIC_LOADED(bp))
3272                 return;
3273
3274         memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3275
3276         fcoe_stat->qos_priority =
3277                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3278
3279         /* insert FCoE stats from ramrod response */
3280         if (!NO_FCOE(bp)) {
3281                 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3282                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3283                         tstorm_queue_statistics;
3284
3285                 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3286                         &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3287                         xstorm_queue_statistics;
3288
3289                 struct fcoe_statistics_params *fw_fcoe_stat =
3290                         &bp->fw_stats_data->fcoe;
3291
3292                 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3293                           fcoe_stat->rx_bytes_lo,
3294                           fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3295
3296                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3297                           fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3298                           fcoe_stat->rx_bytes_lo,
3299                           fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3300
3301                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3302                           fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3303                           fcoe_stat->rx_bytes_lo,
3304                           fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3305
3306                 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3307                           fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3308                           fcoe_stat->rx_bytes_lo,
3309                           fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3310
3311                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3312                           fcoe_stat->rx_frames_lo,
3313                           fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3314
3315                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3316                           fcoe_stat->rx_frames_lo,
3317                           fcoe_q_tstorm_stats->rcv_ucast_pkts);
3318
3319                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3320                           fcoe_stat->rx_frames_lo,
3321                           fcoe_q_tstorm_stats->rcv_bcast_pkts);
3322
3323                 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3324                           fcoe_stat->rx_frames_lo,
3325                           fcoe_q_tstorm_stats->rcv_mcast_pkts);
3326
3327                 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3328                           fcoe_stat->tx_bytes_lo,
3329                           fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3330
3331                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3332                           fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3333                           fcoe_stat->tx_bytes_lo,
3334                           fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3335
3336                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3337                           fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3338                           fcoe_stat->tx_bytes_lo,
3339                           fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3340
3341                 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3342                           fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3343                           fcoe_stat->tx_bytes_lo,
3344                           fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3345
3346                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3347                           fcoe_stat->tx_frames_lo,
3348                           fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3349
3350                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3351                           fcoe_stat->tx_frames_lo,
3352                           fcoe_q_xstorm_stats->ucast_pkts_sent);
3353
3354                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3355                           fcoe_stat->tx_frames_lo,
3356                           fcoe_q_xstorm_stats->bcast_pkts_sent);
3357
3358                 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3359                           fcoe_stat->tx_frames_lo,
3360                           fcoe_q_xstorm_stats->mcast_pkts_sent);
3361         }
3362
3363         /* ask L5 driver to add data to the struct */
3364         bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3365 }
3366
3367 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3368 {
3369         struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3370         struct iscsi_stats_info *iscsi_stat =
3371                 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3372
3373         if (!CNIC_LOADED(bp))
3374                 return;
3375
3376         memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3377                ETH_ALEN);
3378
3379         iscsi_stat->qos_priority =
3380                 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3381
3382         /* ask L5 driver to add data to the struct */
3383         bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3384 }
3385
3386 /* called due to MCP event (on pmf):
3387  *      reread new bandwidth configuration
3388  *      configure FW
3389  *      notify others function about the change
3390  */
3391 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3392 {
3393         if (bp->link_vars.link_up) {
3394                 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3395                 bnx2x_link_sync_notify(bp);
3396         }
3397         storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3398 }
3399
3400 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3401 {
3402         bnx2x_config_mf_bw(bp);
3403         bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3404 }
3405
3406 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3407 {
3408         DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3409         bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3410 }
3411
3412 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3413 {
3414         enum drv_info_opcode op_code;
3415         u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3416
3417         /* if drv_info version supported by MFW doesn't match - send NACK */
3418         if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3419                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3420                 return;
3421         }
3422
3423         op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3424                   DRV_INFO_CONTROL_OP_CODE_SHIFT;
3425
3426         memset(&bp->slowpath->drv_info_to_mcp, 0,
3427                sizeof(union drv_info_to_mcp));
3428
3429         switch (op_code) {
3430         case ETH_STATS_OPCODE:
3431                 bnx2x_drv_info_ether_stat(bp);
3432                 break;
3433         case FCOE_STATS_OPCODE:
3434                 bnx2x_drv_info_fcoe_stat(bp);
3435                 break;
3436         case ISCSI_STATS_OPCODE:
3437                 bnx2x_drv_info_iscsi_stat(bp);
3438                 break;
3439         default:
3440                 /* if op code isn't supported - send NACK */
3441                 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3442                 return;
3443         }
3444
3445         /* if we got drv_info attn from MFW then these fields are defined in
3446          * shmem2 for sure
3447          */
3448         SHMEM2_WR(bp, drv_info_host_addr_lo,
3449                 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3450         SHMEM2_WR(bp, drv_info_host_addr_hi,
3451                 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3452
3453         bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3454 }
3455
3456 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3457 {
3458         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3459
3460         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3461
3462                 /*
3463                  * This is the only place besides the function initialization
3464                  * where the bp->flags can change so it is done without any
3465                  * locks
3466                  */
3467                 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3468                         DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3469                         bp->flags |= MF_FUNC_DIS;
3470
3471                         bnx2x_e1h_disable(bp);
3472                 } else {
3473                         DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3474                         bp->flags &= ~MF_FUNC_DIS;
3475
3476                         bnx2x_e1h_enable(bp);
3477                 }
3478                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3479         }
3480         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
3481                 bnx2x_config_mf_bw(bp);
3482                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3483         }
3484
3485         /* Report results to MCP */
3486         if (dcc_event)
3487                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3488         else
3489                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3490 }
3491
3492 /* must be called under the spq lock */
3493 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3494 {
3495         struct eth_spe *next_spe = bp->spq_prod_bd;
3496
3497         if (bp->spq_prod_bd == bp->spq_last_bd) {
3498                 bp->spq_prod_bd = bp->spq;
3499                 bp->spq_prod_idx = 0;
3500                 DP(BNX2X_MSG_SP, "end of spq\n");
3501         } else {
3502                 bp->spq_prod_bd++;
3503                 bp->spq_prod_idx++;
3504         }
3505         return next_spe;
3506 }
3507
3508 /* must be called under the spq lock */
3509 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3510 {
3511         int func = BP_FUNC(bp);
3512
3513         /*
3514          * Make sure that BD data is updated before writing the producer:
3515          * BD data is written to the memory, the producer is read from the
3516          * memory, thus we need a full memory barrier to ensure the ordering.
3517          */
3518         mb();
3519
3520         REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3521                  bp->spq_prod_idx);
3522         mmiowb();
3523 }
3524
3525 /**
3526  * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3527  *
3528  * @cmd:        command to check
3529  * @cmd_type:   command type
3530  */
3531 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3532 {
3533         if ((cmd_type == NONE_CONNECTION_TYPE) ||
3534             (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3535             (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3536             (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3537             (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3538             (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3539             (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3540                 return true;
3541         else
3542                 return false;
3543 }
3544
3545 /**
3546  * bnx2x_sp_post - place a single command on an SP ring
3547  *
3548  * @bp:         driver handle
3549  * @command:    command to place (e.g. SETUP, FILTER_RULES, etc.)
3550  * @cid:        SW CID the command is related to
3551  * @data_hi:    command private data address (high 32 bits)
3552  * @data_lo:    command private data address (low 32 bits)
3553  * @cmd_type:   command type (e.g. NONE, ETH)
3554  *
3555  * SP data is handled as if it's always an address pair, thus data fields are
3556  * not swapped to little endian in upper functions. Instead this function swaps
3557  * data as if it's two u32 fields.
3558  */
3559 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3560                   u32 data_hi, u32 data_lo, int cmd_type)
3561 {
3562         struct eth_spe *spe;
3563         u16 type;
3564         bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3565
3566 #ifdef BNX2X_STOP_ON_ERROR
3567         if (unlikely(bp->panic)) {
3568                 BNX2X_ERR("Can't post SP when there is panic\n");
3569                 return -EIO;
3570         }
3571 #endif
3572
3573         spin_lock_bh(&bp->spq_lock);
3574
3575         if (common) {
3576                 if (!atomic_read(&bp->eq_spq_left)) {
3577                         BNX2X_ERR("BUG! EQ ring full!\n");
3578                         spin_unlock_bh(&bp->spq_lock);
3579                         bnx2x_panic();
3580                         return -EBUSY;
3581                 }
3582         } else if (!atomic_read(&bp->cq_spq_left)) {
3583                         BNX2X_ERR("BUG! SPQ ring full!\n");
3584                         spin_unlock_bh(&bp->spq_lock);
3585                         bnx2x_panic();
3586                         return -EBUSY;
3587         }
3588
3589         spe = bnx2x_sp_get_next(bp);
3590
3591         /* CID needs port number to be encoded int it */
3592         spe->hdr.conn_and_cmd_data =
3593                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3594                                     HW_CID(bp, cid));
3595
3596         type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
3597
3598         type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3599                  SPE_HDR_FUNCTION_ID);
3600
3601         spe->hdr.type = cpu_to_le16(type);
3602
3603         spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3604         spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3605
3606         /*
3607          * It's ok if the actual decrement is issued towards the memory
3608          * somewhere between the spin_lock and spin_unlock. Thus no
3609          * more explicit memory barrier is needed.
3610          */
3611         if (common)
3612                 atomic_dec(&bp->eq_spq_left);
3613         else
3614                 atomic_dec(&bp->cq_spq_left);
3615
3616         DP(BNX2X_MSG_SP,
3617            "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3618            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3619            (u32)(U64_LO(bp->spq_mapping) +
3620            (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3621            HW_CID(bp, cid), data_hi, data_lo, type,
3622            atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3623
3624         bnx2x_sp_prod_update(bp);
3625         spin_unlock_bh(&bp->spq_lock);
3626         return 0;
3627 }
3628
3629 /* acquire split MCP access lock register */
3630 static int bnx2x_acquire_alr(struct bnx2x *bp)
3631 {
3632         u32 j, val;
3633         int rc = 0;
3634
3635         might_sleep();
3636         for (j = 0; j < 1000; j++) {
3637                 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3638                 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3639                 if (val & MCPR_ACCESS_LOCK_LOCK)
3640                         break;
3641
3642                 usleep_range(5000, 10000);
3643         }
3644         if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3645                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3646                 rc = -EBUSY;
3647         }
3648
3649         return rc;
3650 }
3651
3652 /* release split MCP access lock register */
3653 static void bnx2x_release_alr(struct bnx2x *bp)
3654 {
3655         REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3656 }
3657
3658 #define BNX2X_DEF_SB_ATT_IDX    0x0001
3659 #define BNX2X_DEF_SB_IDX        0x0002
3660
3661 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3662 {
3663         struct host_sp_status_block *def_sb = bp->def_status_blk;
3664         u16 rc = 0;
3665
3666         barrier(); /* status block is written to by the chip */
3667         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3668                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3669                 rc |= BNX2X_DEF_SB_ATT_IDX;
3670         }
3671
3672         if (bp->def_idx != def_sb->sp_sb.running_index) {
3673                 bp->def_idx = def_sb->sp_sb.running_index;
3674                 rc |= BNX2X_DEF_SB_IDX;
3675         }
3676
3677         /* Do not reorder: indices reading should complete before handling */
3678         barrier();
3679         return rc;
3680 }
3681
3682 /*
3683  * slow path service functions
3684  */
3685
3686 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3687 {
3688         int port = BP_PORT(bp);
3689         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3690                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
3691         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3692                                        NIG_REG_MASK_INTERRUPT_PORT0;
3693         u32 aeu_mask;
3694         u32 nig_mask = 0;
3695         u32 reg_addr;
3696
3697         if (bp->attn_state & asserted)
3698                 BNX2X_ERR("IGU ERROR\n");
3699
3700         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3701         aeu_mask = REG_RD(bp, aeu_addr);
3702
3703         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
3704            aeu_mask, asserted);
3705         aeu_mask &= ~(asserted & 0x3ff);
3706         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3707
3708         REG_WR(bp, aeu_addr, aeu_mask);
3709         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3710
3711         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3712         bp->attn_state |= asserted;
3713         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3714
3715         if (asserted & ATTN_HARD_WIRED_MASK) {
3716                 if (asserted & ATTN_NIG_FOR_FUNC) {
3717
3718                         bnx2x_acquire_phy_lock(bp);
3719
3720                         /* save nig interrupt mask */
3721                         nig_mask = REG_RD(bp, nig_int_mask_addr);
3722
3723                         /* If nig_mask is not set, no need to call the update
3724                          * function.
3725                          */
3726                         if (nig_mask) {
3727                                 REG_WR(bp, nig_int_mask_addr, 0);
3728
3729                                 bnx2x_link_attn(bp);
3730                         }
3731
3732                         /* handle unicore attn? */
3733                 }
3734                 if (asserted & ATTN_SW_TIMER_4_FUNC)
3735                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3736
3737                 if (asserted & GPIO_2_FUNC)
3738                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3739
3740                 if (asserted & GPIO_3_FUNC)
3741                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3742
3743                 if (asserted & GPIO_4_FUNC)
3744                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3745
3746                 if (port == 0) {
3747                         if (asserted & ATTN_GENERAL_ATTN_1) {
3748                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3749                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3750                         }
3751                         if (asserted & ATTN_GENERAL_ATTN_2) {
3752                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3753                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3754                         }
3755                         if (asserted & ATTN_GENERAL_ATTN_3) {
3756                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3757                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3758                         }
3759                 } else {
3760                         if (asserted & ATTN_GENERAL_ATTN_4) {
3761                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3762                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3763                         }
3764                         if (asserted & ATTN_GENERAL_ATTN_5) {
3765                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3766                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3767                         }
3768                         if (asserted & ATTN_GENERAL_ATTN_6) {
3769                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3770                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3771                         }
3772                 }
3773
3774         } /* if hardwired */
3775
3776         if (bp->common.int_block == INT_BLOCK_HC)
3777                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3778                             COMMAND_REG_ATTN_BITS_SET);
3779         else
3780                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3781
3782         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3783            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3784         REG_WR(bp, reg_addr, asserted);
3785
3786         /* now set back the mask */
3787         if (asserted & ATTN_NIG_FOR_FUNC) {
3788                 /* Verify that IGU ack through BAR was written before restoring
3789                  * NIG mask. This loop should exit after 2-3 iterations max.
3790                  */
3791                 if (bp->common.int_block != INT_BLOCK_HC) {
3792                         u32 cnt = 0, igu_acked;
3793                         do {
3794                                 igu_acked = REG_RD(bp,
3795                                                    IGU_REG_ATTENTION_ACK_BITS);
3796                         } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3797                                  (++cnt < MAX_IGU_ATTN_ACK_TO));
3798                         if (!igu_acked)
3799                                 DP(NETIF_MSG_HW,
3800                                    "Failed to verify IGU ack on time\n");
3801                         barrier();
3802                 }
3803                 REG_WR(bp, nig_int_mask_addr, nig_mask);
3804                 bnx2x_release_phy_lock(bp);
3805         }
3806 }
3807
3808 static void bnx2x_fan_failure(struct bnx2x *bp)
3809 {
3810         int port = BP_PORT(bp);
3811         u32 ext_phy_config;
3812         /* mark the failure */
3813         ext_phy_config =
3814                 SHMEM_RD(bp,
3815                          dev_info.port_hw_config[port].external_phy_config);
3816
3817         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3818         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
3819         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
3820                  ext_phy_config);
3821
3822         /* log the failure */
3823         netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3824                             "Please contact OEM Support for assistance\n");
3825
3826         /* Schedule device reset (unload)
3827          * This is due to some boards consuming sufficient power when driver is
3828          * up to overheat if fan fails.
3829          */
3830         smp_mb__before_clear_bit();
3831         set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3832         smp_mb__after_clear_bit();
3833         schedule_delayed_work(&bp->sp_rtnl_task, 0);
3834 }
3835
3836 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3837 {
3838         int port = BP_PORT(bp);
3839         int reg_offset;
3840         u32 val;
3841
3842         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3843                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3844
3845         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
3846
3847                 val = REG_RD(bp, reg_offset);
3848                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3849                 REG_WR(bp, reg_offset, val);
3850
3851                 BNX2X_ERR("SPIO5 hw attention\n");
3852
3853                 /* Fan failure attention */
3854                 bnx2x_hw_reset_phy(&bp->link_params);
3855                 bnx2x_fan_failure(bp);
3856         }
3857
3858         if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
3859                 bnx2x_acquire_phy_lock(bp);
3860                 bnx2x_handle_module_detect_int(&bp->link_params);
3861                 bnx2x_release_phy_lock(bp);
3862         }
3863
3864         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3865
3866                 val = REG_RD(bp, reg_offset);
3867                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3868                 REG_WR(bp, reg_offset, val);
3869
3870                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3871                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3872                 bnx2x_panic();
3873         }
3874 }
3875
3876 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3877 {
3878         u32 val;
3879
3880         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3881
3882                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3883                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3884                 /* DORQ discard attention */
3885                 if (val & 0x2)
3886                         BNX2X_ERR("FATAL error from DORQ\n");
3887         }
3888
3889         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3890
3891                 int port = BP_PORT(bp);
3892                 int reg_offset;
3893
3894                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3895                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3896
3897                 val = REG_RD(bp, reg_offset);
3898                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3899                 REG_WR(bp, reg_offset, val);
3900
3901                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3902                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3903                 bnx2x_panic();
3904         }
3905 }
3906
3907 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3908 {
3909         u32 val;
3910
3911         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3912
3913                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3914                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3915                 /* CFC error attention */
3916                 if (val & 0x2)
3917                         BNX2X_ERR("FATAL error from CFC\n");
3918         }
3919
3920         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3921                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3922                 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
3923                 /* RQ_USDMDP_FIFO_OVERFLOW */
3924                 if (val & 0x18000)
3925                         BNX2X_ERR("FATAL error from PXP\n");
3926
3927                 if (!CHIP_IS_E1x(bp)) {
3928                         val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3929                         BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3930                 }
3931         }
3932
3933         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3934
3935                 int port = BP_PORT(bp);
3936                 int reg_offset;
3937
3938                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3939                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3940
3941                 val = REG_RD(bp, reg_offset);
3942                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3943                 REG_WR(bp, reg_offset, val);
3944
3945                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3946                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3947                 bnx2x_panic();
3948         }
3949 }
3950
3951 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3952 {
3953         u32 val;
3954
3955         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3956
3957                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3958                         int func = BP_FUNC(bp);
3959
3960                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3961                         bnx2x_read_mf_cfg(bp);
3962                         bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3963                                         func_mf_config[BP_ABS_FUNC(bp)].config);
3964                         val = SHMEM_RD(bp,
3965                                        func_mb[BP_FW_MB_IDX(bp)].drv_status);
3966                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3967                                 bnx2x_dcc_event(bp,
3968                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3969
3970                         if (val & DRV_STATUS_SET_MF_BW)
3971                                 bnx2x_set_mf_bw(bp);
3972
3973                         if (val & DRV_STATUS_DRV_INFO_REQ)
3974                                 bnx2x_handle_drv_info_req(bp);
3975
3976                         if (val & DRV_STATUS_VF_DISABLED)
3977                                 bnx2x_vf_handle_flr_event(bp);
3978
3979                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3980                                 bnx2x_pmf_update(bp);
3981
3982                         if (bp->port.pmf &&
3983                             (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3984                                 bp->dcbx_enabled > 0)
3985                                 /* start dcbx state machine */
3986                                 bnx2x_dcbx_set_params(bp,
3987                                         BNX2X_DCBX_STATE_NEG_RECEIVED);
3988                         if (val & DRV_STATUS_AFEX_EVENT_MASK)
3989                                 bnx2x_handle_afex_cmd(bp,
3990                                         val & DRV_STATUS_AFEX_EVENT_MASK);
3991                         if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3992                                 bnx2x_handle_eee_event(bp);
3993                         if (bp->link_vars.periodic_flags &
3994                             PERIODIC_FLAGS_LINK_EVENT) {
3995                                 /*  sync with link */
3996                                 bnx2x_acquire_phy_lock(bp);
3997                                 bp->link_vars.periodic_flags &=
3998                                         ~PERIODIC_FLAGS_LINK_EVENT;
3999                                 bnx2x_release_phy_lock(bp);
4000                                 if (IS_MF(bp))
4001                                         bnx2x_link_sync_notify(bp);
4002                                 bnx2x_link_report(bp);
4003                         }
4004                         /* Always call it here: bnx2x_link_report() will
4005                          * prevent the link indication duplication.
4006                          */
4007                         bnx2x__link_status_update(bp);
4008                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4009
4010                         BNX2X_ERR("MC assert!\n");
4011                         bnx2x_mc_assert(bp);
4012                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4013                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4014                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4015                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4016                         bnx2x_panic();
4017
4018                 } else if (attn & BNX2X_MCP_ASSERT) {
4019
4020                         BNX2X_ERR("MCP assert!\n");
4021                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4022                         bnx2x_fw_dump(bp);
4023
4024                 } else
4025                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4026         }
4027
4028         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4029                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4030                 if (attn & BNX2X_GRC_TIMEOUT) {
4031                         val = CHIP_IS_E1(bp) ? 0 :
4032                                         REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4033                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
4034                 }
4035                 if (attn & BNX2X_GRC_RSV) {
4036                         val = CHIP_IS_E1(bp) ? 0 :
4037                                         REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4038                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
4039                 }
4040                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4041         }
4042 }
4043
4044 /*
4045  * Bits map:
4046  * 0-7   - Engine0 load counter.
4047  * 8-15  - Engine1 load counter.
4048  * 16    - Engine0 RESET_IN_PROGRESS bit.
4049  * 17    - Engine1 RESET_IN_PROGRESS bit.
4050  * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4051  *         on the engine
4052  * 19    - Engine1 ONE_IS_LOADED.
4053  * 20    - Chip reset flow bit. When set none-leader must wait for both engines
4054  *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
4055  *         just the one belonging to its engine).
4056  *
4057  */
4058 #define BNX2X_RECOVERY_GLOB_REG         MISC_REG_GENERIC_POR_1
4059
4060 #define BNX2X_PATH0_LOAD_CNT_MASK       0x000000ff
4061 #define BNX2X_PATH0_LOAD_CNT_SHIFT      0
4062 #define BNX2X_PATH1_LOAD_CNT_MASK       0x0000ff00
4063 #define BNX2X_PATH1_LOAD_CNT_SHIFT      8
4064 #define BNX2X_PATH0_RST_IN_PROG_BIT     0x00010000
4065 #define BNX2X_PATH1_RST_IN_PROG_BIT     0x00020000
4066 #define BNX2X_GLOBAL_RESET_BIT          0x00040000
4067
4068 /*
4069  * Set the GLOBAL_RESET bit.
4070  *
4071  * Should be run under rtnl lock
4072  */
4073 void bnx2x_set_reset_global(struct bnx2x *bp)
4074 {
4075         u32 val;
4076         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4077         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4078         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4079         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4080 }
4081
4082 /*
4083  * Clear the GLOBAL_RESET bit.
4084  *
4085  * Should be run under rtnl lock
4086  */
4087 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4088 {
4089         u32 val;
4090         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4091         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4092         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4093         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4094 }
4095
4096 /*
4097  * Checks the GLOBAL_RESET bit.
4098  *
4099  * should be run under rtnl lock
4100  */
4101 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4102 {
4103         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4104
4105         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4106         return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4107 }
4108
4109 /*
4110  * Clear RESET_IN_PROGRESS bit for the current engine.
4111  *
4112  * Should be run under rtnl lock
4113  */
4114 static void bnx2x_set_reset_done(struct bnx2x *bp)
4115 {
4116         u32 val;
4117         u32 bit = BP_PATH(bp) ?
4118                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4119         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4120         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4121
4122         /* Clear the bit */
4123         val &= ~bit;
4124         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4125
4126         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4127 }
4128
4129 /*
4130  * Set RESET_IN_PROGRESS for the current engine.
4131  *
4132  * should be run under rtnl lock
4133  */
4134 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4135 {
4136         u32 val;
4137         u32 bit = BP_PATH(bp) ?
4138                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4139         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4140         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4141
4142         /* Set the bit */
4143         val |= bit;
4144         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4145         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4146 }
4147
4148 /*
4149  * Checks the RESET_IN_PROGRESS bit for the given engine.
4150  * should be run under rtnl lock
4151  */
4152 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4153 {
4154         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4155         u32 bit = engine ?
4156                 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4157
4158         /* return false if bit is set */
4159         return (val & bit) ? false : true;
4160 }
4161
4162 /*
4163  * set pf load for the current pf.
4164  *
4165  * should be run under rtnl lock
4166  */
4167 void bnx2x_set_pf_load(struct bnx2x *bp)
4168 {
4169         u32 val1, val;
4170         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4171                              BNX2X_PATH0_LOAD_CNT_MASK;
4172         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4173                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4174
4175         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4176         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4177
4178         DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4179
4180         /* get the current counter value */
4181         val1 = (val & mask) >> shift;
4182
4183         /* set bit of that PF */
4184         val1 |= (1 << bp->pf_num);
4185
4186         /* clear the old value */
4187         val &= ~mask;
4188
4189         /* set the new one */
4190         val |= ((val1 << shift) & mask);
4191
4192         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4193         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4194 }
4195
4196 /**
4197  * bnx2x_clear_pf_load - clear pf load mark
4198  *
4199  * @bp:         driver handle
4200  *
4201  * Should be run under rtnl lock.
4202  * Decrements the load counter for the current engine. Returns
4203  * whether other functions are still loaded
4204  */
4205 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4206 {
4207         u32 val1, val;
4208         u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4209                              BNX2X_PATH0_LOAD_CNT_MASK;
4210         u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4211                              BNX2X_PATH0_LOAD_CNT_SHIFT;
4212
4213         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4214         val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4215         DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4216
4217         /* get the current counter value */
4218         val1 = (val & mask) >> shift;
4219
4220         /* clear bit of that PF */
4221         val1 &= ~(1 << bp->pf_num);
4222
4223         /* clear the old value */
4224         val &= ~mask;
4225
4226         /* set the new one */
4227         val |= ((val1 << shift) & mask);
4228
4229         REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4230         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4231         return val1 != 0;
4232 }
4233
4234 /*
4235  * Read the load status for the current engine.
4236  *
4237  * should be run under rtnl lock
4238  */
4239 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4240 {
4241         u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4242                              BNX2X_PATH0_LOAD_CNT_MASK);
4243         u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4244                              BNX2X_PATH0_LOAD_CNT_SHIFT);
4245         u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4246
4247         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4248
4249         val = (val & mask) >> shift;
4250
4251         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4252            engine, val);
4253
4254         return val != 0;
4255 }
4256
4257 static void _print_parity(struct bnx2x *bp, u32 reg)
4258 {
4259         pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4260 }
4261
4262 static void _print_next_block(int idx, const char *blk)
4263 {
4264         pr_cont("%s%s", idx ? ", " : "", blk);
4265 }
4266
4267 static int bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4268                                             int par_num, bool print)
4269 {
4270         int i = 0;
4271         u32 cur_bit = 0;
4272         for (i = 0; sig; i++) {
4273                 cur_bit = ((u32)0x1 << i);
4274                 if (sig & cur_bit) {
4275                         switch (cur_bit) {
4276                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4277                                 if (print) {
4278                                         _print_next_block(par_num++, "BRB");
4279                                         _print_parity(bp,
4280                                                       BRB1_REG_BRB1_PRTY_STS);
4281                                 }
4282                                 break;
4283                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4284                                 if (print) {
4285                                         _print_next_block(par_num++, "PARSER");
4286                                         _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4287                                 }
4288                                 break;
4289                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4290                                 if (print) {
4291                                         _print_next_block(par_num++, "TSDM");
4292                                         _print_parity(bp,
4293                                                       TSDM_REG_TSDM_PRTY_STS);
4294                                 }
4295                                 break;
4296                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4297                                 if (print) {
4298                                         _print_next_block(par_num++,
4299                                                           "SEARCHER");
4300                                         _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4301                                 }
4302                                 break;
4303                         case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4304                                 if (print) {
4305                                         _print_next_block(par_num++, "TCM");
4306                                         _print_parity(bp,
4307                                                       TCM_REG_TCM_PRTY_STS);
4308                                 }
4309                                 break;
4310                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4311                                 if (print) {
4312                                         _print_next_block(par_num++, "TSEMI");
4313                                         _print_parity(bp,
4314                                                       TSEM_REG_TSEM_PRTY_STS_0);
4315                                         _print_parity(bp,
4316                                                       TSEM_REG_TSEM_PRTY_STS_1);
4317                                 }
4318                                 break;
4319                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4320                                 if (print) {
4321                                         _print_next_block(par_num++, "XPB");
4322                                         _print_parity(bp, GRCBASE_XPB +
4323                                                           PB_REG_PB_PRTY_STS);
4324                                 }
4325                                 break;
4326                         }
4327
4328                         /* Clear the bit */
4329                         sig &= ~cur_bit;
4330                 }
4331         }
4332
4333         return par_num;
4334 }
4335
4336 static int bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4337                                             int par_num, bool *global,
4338                                             bool print)
4339 {
4340         int i = 0;
4341         u32 cur_bit = 0;
4342         for (i = 0; sig; i++) {
4343                 cur_bit = ((u32)0x1 << i);
4344                 if (sig & cur_bit) {
4345                         switch (cur_bit) {
4346                         case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4347                                 if (print) {
4348                                         _print_next_block(par_num++, "PBF");
4349                                         _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4350                                 }
4351                                 break;
4352                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4353                                 if (print) {
4354                                         _print_next_block(par_num++, "QM");
4355                                         _print_parity(bp, QM_REG_QM_PRTY_STS);
4356                                 }
4357                                 break;
4358                         case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4359                                 if (print) {
4360                                         _print_next_block(par_num++, "TM");
4361                                         _print_parity(bp, TM_REG_TM_PRTY_STS);
4362                                 }
4363                                 break;
4364                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4365                                 if (print) {
4366                                         _print_next_block(par_num++, "XSDM");
4367                                         _print_parity(bp,
4368                                                       XSDM_REG_XSDM_PRTY_STS);
4369                                 }
4370                                 break;
4371                         case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4372                                 if (print) {
4373                                         _print_next_block(par_num++, "XCM");
4374                                         _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4375                                 }
4376                                 break;
4377                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4378                                 if (print) {
4379                                         _print_next_block(par_num++, "XSEMI");
4380                                         _print_parity(bp,
4381                                                       XSEM_REG_XSEM_PRTY_STS_0);
4382                                         _print_parity(bp,
4383                                                       XSEM_REG_XSEM_PRTY_STS_1);
4384                                 }
4385                                 break;
4386                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4387                                 if (print) {
4388                                         _print_next_block(par_num++,
4389                                                           "DOORBELLQ");
4390                                         _print_parity(bp,
4391                                                       DORQ_REG_DORQ_PRTY_STS);
4392                                 }
4393                                 break;
4394                         case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4395                                 if (print) {
4396                                         _print_next_block(par_num++, "NIG");
4397                                         if (CHIP_IS_E1x(bp)) {
4398                                                 _print_parity(bp,
4399                                                         NIG_REG_NIG_PRTY_STS);
4400                                         } else {
4401                                                 _print_parity(bp,
4402                                                         NIG_REG_NIG_PRTY_STS_0);
4403                                                 _print_parity(bp,
4404                                                         NIG_REG_NIG_PRTY_STS_1);
4405                                         }
4406                                 }
4407                                 break;
4408                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4409                                 if (print)
4410                                         _print_next_block(par_num++,
4411                                                           "VAUX PCI CORE");
4412                                 *global = true;
4413                                 break;
4414                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4415                                 if (print) {
4416                                         _print_next_block(par_num++, "DEBUG");
4417                                         _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4418                                 }
4419                                 break;
4420                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4421                                 if (print) {
4422                                         _print_next_block(par_num++, "USDM");
4423                                         _print_parity(bp,
4424                                                       USDM_REG_USDM_PRTY_STS);
4425                                 }
4426                                 break;
4427                         case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4428                                 if (print) {
4429                                         _print_next_block(par_num++, "UCM");
4430                                         _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4431                                 }
4432                                 break;
4433                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4434                                 if (print) {
4435                                         _print_next_block(par_num++, "USEMI");
4436                                         _print_parity(bp,
4437                                                       USEM_REG_USEM_PRTY_STS_0);
4438                                         _print_parity(bp,
4439                                                       USEM_REG_USEM_PRTY_STS_1);
4440                                 }
4441                                 break;
4442                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4443                                 if (print) {
4444                                         _print_next_block(par_num++, "UPB");
4445                                         _print_parity(bp, GRCBASE_UPB +
4446                                                           PB_REG_PB_PRTY_STS);
4447                                 }
4448                                 break;
4449                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4450                                 if (print) {
4451                                         _print_next_block(par_num++, "CSDM");
4452                                         _print_parity(bp,
4453                                                       CSDM_REG_CSDM_PRTY_STS);
4454                                 }
4455                                 break;
4456                         case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4457                                 if (print) {
4458                                         _print_next_block(par_num++, "CCM");
4459                                         _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4460                                 }
4461                                 break;
4462                         }
4463
4464                         /* Clear the bit */
4465                         sig &= ~cur_bit;
4466                 }
4467         }
4468
4469         return par_num;
4470 }
4471
4472 static int bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4473                                             int par_num, bool print)
4474 {
4475         int i = 0;
4476         u32 cur_bit = 0;
4477         for (i = 0; sig; i++) {
4478                 cur_bit = ((u32)0x1 << i);
4479                 if (sig & cur_bit) {
4480                         switch (cur_bit) {
4481                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4482                                 if (print) {
4483                                         _print_next_block(par_num++, "CSEMI");
4484                                         _print_parity(bp,
4485                                                       CSEM_REG_CSEM_PRTY_STS_0);
4486                                         _print_parity(bp,
4487                                                       CSEM_REG_CSEM_PRTY_STS_1);
4488                                 }
4489                                 break;
4490                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4491                                 if (print) {
4492                                         _print_next_block(par_num++, "PXP");
4493                                         _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4494                                         _print_parity(bp,
4495                                                       PXP2_REG_PXP2_PRTY_STS_0);
4496                                         _print_parity(bp,
4497                                                       PXP2_REG_PXP2_PRTY_STS_1);
4498                                 }
4499                                 break;
4500                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4501                                 if (print)
4502                                         _print_next_block(par_num++,
4503                                         "PXPPCICLOCKCLIENT");
4504                                 break;
4505                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4506                                 if (print) {
4507                                         _print_next_block(par_num++, "CFC");
4508                                         _print_parity(bp,
4509                                                       CFC_REG_CFC_PRTY_STS);
4510                                 }
4511                                 break;
4512                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4513                                 if (print) {
4514                                         _print_next_block(par_num++, "CDU");
4515                                         _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4516                                 }
4517                                 break;
4518                         case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4519                                 if (print) {
4520                                         _print_next_block(par_num++, "DMAE");
4521                                         _print_parity(bp,
4522                                                       DMAE_REG_DMAE_PRTY_STS);
4523                                 }
4524                                 break;
4525                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4526                                 if (print) {
4527                                         _print_next_block(par_num++, "IGU");
4528                                         if (CHIP_IS_E1x(bp))
4529                                                 _print_parity(bp,
4530                                                         HC_REG_HC_PRTY_STS);
4531                                         else
4532                                                 _print_parity(bp,
4533                                                         IGU_REG_IGU_PRTY_STS);
4534                                 }
4535                                 break;
4536                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4537                                 if (print) {
4538                                         _print_next_block(par_num++, "MISC");
4539                                         _print_parity(bp,
4540                                                       MISC_REG_MISC_PRTY_STS);
4541                                 }
4542                                 break;
4543                         }
4544
4545                         /* Clear the bit */
4546                         sig &= ~cur_bit;
4547                 }
4548         }
4549
4550         return par_num;
4551 }
4552
4553 static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4554                                            bool *global, bool print)
4555 {
4556         int i = 0;
4557         u32 cur_bit = 0;
4558         for (i = 0; sig; i++) {
4559                 cur_bit = ((u32)0x1 << i);
4560                 if (sig & cur_bit) {
4561                         switch (cur_bit) {
4562                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4563                                 if (print)
4564                                         _print_next_block(par_num++, "MCP ROM");
4565                                 *global = true;
4566                                 break;
4567                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4568                                 if (print)
4569                                         _print_next_block(par_num++,
4570                                                           "MCP UMP RX");
4571                                 *global = true;
4572                                 break;
4573                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4574                                 if (print)
4575                                         _print_next_block(par_num++,
4576                                                           "MCP UMP TX");
4577                                 *global = true;
4578                                 break;
4579                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4580                                 if (print)
4581                                         _print_next_block(par_num++,
4582                                                           "MCP SCPAD");
4583                                 *global = true;
4584                                 break;
4585                         }
4586
4587                         /* Clear the bit */
4588                         sig &= ~cur_bit;
4589                 }
4590         }
4591
4592         return par_num;
4593 }
4594
4595 static int bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4596                                             int par_num, bool print)
4597 {
4598         int i = 0;
4599         u32 cur_bit = 0;
4600         for (i = 0; sig; i++) {
4601                 cur_bit = ((u32)0x1 << i);
4602                 if (sig & cur_bit) {
4603                         switch (cur_bit) {
4604                         case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4605                                 if (print) {
4606                                         _print_next_block(par_num++, "PGLUE_B");
4607                                         _print_parity(bp,
4608                                                 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4609                                 }
4610                                 break;
4611                         case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4612                                 if (print) {
4613                                         _print_next_block(par_num++, "ATC");
4614                                         _print_parity(bp,
4615                                                       ATC_REG_ATC_PRTY_STS);
4616                                 }
4617                                 break;
4618                         }
4619
4620                         /* Clear the bit */
4621                         sig &= ~cur_bit;
4622                 }
4623         }
4624
4625         return par_num;
4626 }
4627
4628 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4629                               u32 *sig)
4630 {
4631         if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4632             (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4633             (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4634             (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4635             (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4636                 int par_num = 0;
4637                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4638                                  "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4639                           sig[0] & HW_PRTY_ASSERT_SET_0,
4640                           sig[1] & HW_PRTY_ASSERT_SET_1,
4641                           sig[2] & HW_PRTY_ASSERT_SET_2,
4642                           sig[3] & HW_PRTY_ASSERT_SET_3,
4643                           sig[4] & HW_PRTY_ASSERT_SET_4);
4644                 if (print)
4645                         netdev_err(bp->dev,
4646                                    "Parity errors detected in blocks: ");
4647                 par_num = bnx2x_check_blocks_with_parity0(bp,
4648                         sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
4649                 par_num = bnx2x_check_blocks_with_parity1(bp,
4650                         sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
4651                 par_num = bnx2x_check_blocks_with_parity2(bp,
4652                         sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
4653                 par_num = bnx2x_check_blocks_with_parity3(
4654                         sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4655                 par_num = bnx2x_check_blocks_with_parity4(bp,
4656                         sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4657
4658                 if (print)
4659                         pr_cont("\n");
4660
4661                 return true;
4662         } else
4663                 return false;
4664 }
4665
4666 /**
4667  * bnx2x_chk_parity_attn - checks for parity attentions.
4668  *
4669  * @bp:         driver handle
4670  * @global:     true if there was a global attention
4671  * @print:      show parity attention in syslog
4672  */
4673 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4674 {
4675         struct attn_route attn = { {0} };
4676         int port = BP_PORT(bp);
4677
4678         attn.sig[0] = REG_RD(bp,
4679                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4680                              port*4);
4681         attn.sig[1] = REG_RD(bp,
4682                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4683                              port*4);
4684         attn.sig[2] = REG_RD(bp,
4685                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4686                              port*4);
4687         attn.sig[3] = REG_RD(bp,
4688                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4689                              port*4);
4690
4691         if (!CHIP_IS_E1x(bp))
4692                 attn.sig[4] = REG_RD(bp,
4693                         MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4694                                      port*4);
4695
4696         return bnx2x_parity_attn(bp, global, print, attn.sig);
4697 }
4698
4699 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4700 {
4701         u32 val;
4702         if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4703
4704                 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4705                 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4706                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
4707                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
4708                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
4709                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
4710                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
4711                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
4712                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
4713                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
4714                 if (val &
4715                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
4716                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
4717                 if (val &
4718                     PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
4719                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
4720                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
4721                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
4722                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
4723                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
4724                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
4725                         BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
4726         }
4727         if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4728                 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4729                 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4730                 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4731                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4732                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
4733                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
4734                 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
4735                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
4736                 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
4737                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
4738                 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4739                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4740                 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
4741                         BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
4742         }
4743
4744         if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4745                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4746                 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4747                 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4748                     AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4749         }
4750 }
4751
4752 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4753 {
4754         struct attn_route attn, *group_mask;
4755         int port = BP_PORT(bp);
4756         int index;
4757         u32 reg_addr;
4758         u32 val;
4759         u32 aeu_mask;
4760         bool global = false;
4761
4762         /* need to take HW lock because MCP or other port might also
4763            try to handle this event */
4764         bnx2x_acquire_alr(bp);
4765
4766         if (bnx2x_chk_parity_attn(bp, &global, true)) {
4767 #ifndef BNX2X_STOP_ON_ERROR
4768                 bp->recovery_state = BNX2X_RECOVERY_INIT;
4769                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4770                 /* Disable HW interrupts */
4771                 bnx2x_int_disable(bp);
4772                 /* In case of parity errors don't handle attentions so that
4773                  * other function would "see" parity errors.
4774                  */
4775 #else
4776                 bnx2x_panic();
4777 #endif
4778                 bnx2x_release_alr(bp);
4779                 return;
4780         }
4781
4782         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4783         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4784         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4785         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4786         if (!CHIP_IS_E1x(bp))
4787                 attn.sig[4] =
4788                       REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4789         else
4790                 attn.sig[4] = 0;
4791
4792         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4793            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
4794
4795         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4796                 if (deasserted & (1 << index)) {
4797                         group_mask = &bp->attn_group[index];
4798
4799                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
4800                            index,
4801                            group_mask->sig[0], group_mask->sig[1],
4802                            group_mask->sig[2], group_mask->sig[3],
4803                            group_mask->sig[4]);
4804
4805                         bnx2x_attn_int_deasserted4(bp,
4806                                         attn.sig[4] & group_mask->sig[4]);
4807                         bnx2x_attn_int_deasserted3(bp,
4808                                         attn.sig[3] & group_mask->sig[3]);
4809                         bnx2x_attn_int_deasserted1(bp,
4810                                         attn.sig[1] & group_mask->sig[1]);
4811                         bnx2x_attn_int_deasserted2(bp,
4812                                         attn.sig[2] & group_mask->sig[2]);
4813                         bnx2x_attn_int_deasserted0(bp,
4814                                         attn.sig[0] & group_mask->sig[0]);
4815                 }
4816         }
4817
4818         bnx2x_release_alr(bp);
4819
4820         if (bp->common.int_block == INT_BLOCK_HC)
4821                 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4822                             COMMAND_REG_ATTN_BITS_CLR);
4823         else
4824                 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
4825
4826         val = ~deasserted;
4827         DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4828            (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4829         REG_WR(bp, reg_addr, val);
4830
4831         if (~bp->attn_state & deasserted)
4832                 BNX2X_ERR("IGU ERROR\n");
4833
4834         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4835                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
4836
4837         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4838         aeu_mask = REG_RD(bp, reg_addr);
4839
4840         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
4841            aeu_mask, deasserted);
4842         aeu_mask |= (deasserted & 0x3ff);
4843         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4844
4845         REG_WR(bp, reg_addr, aeu_mask);
4846         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4847
4848         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4849         bp->attn_state &= ~deasserted;
4850         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4851 }
4852
4853 static void bnx2x_attn_int(struct bnx2x *bp)
4854 {
4855         /* read local copy of bits */
4856         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4857                                                                 attn_bits);
4858         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4859                                                                 attn_bits_ack);
4860         u32 attn_state = bp->attn_state;
4861
4862         /* look for changed bits */
4863         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
4864         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
4865
4866         DP(NETIF_MSG_HW,
4867            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
4868            attn_bits, attn_ack, asserted, deasserted);
4869
4870         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4871                 BNX2X_ERR("BAD attention state\n");
4872
4873         /* handle bits that were raised */
4874         if (asserted)
4875                 bnx2x_attn_int_asserted(bp, asserted);
4876
4877         if (deasserted)
4878                 bnx2x_attn_int_deasserted(bp, deasserted);
4879 }
4880
4881 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4882                       u16 index, u8 op, u8 update)
4883 {
4884         u32 igu_addr = bp->igu_base_addr;
4885         igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
4886         bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4887                              igu_addr);
4888 }
4889
4890 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4891 {
4892         /* No memory barriers */
4893         storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4894         mmiowb(); /* keep prod updates ordered */
4895 }
4896
4897 static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4898                                       union event_ring_elem *elem)
4899 {
4900         u8 err = elem->message.error;
4901
4902         if (!bp->cnic_eth_dev.starting_cid  ||
4903             (cid < bp->cnic_eth_dev.starting_cid &&
4904             cid != bp->cnic_eth_dev.iscsi_l2_cid))
4905                 return 1;
4906
4907         DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4908
4909         if (unlikely(err)) {
4910
4911                 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4912                           cid);
4913                 bnx2x_panic_dump(bp, false);
4914         }
4915         bnx2x_cnic_cfc_comp(bp, cid, err);
4916         return 0;
4917 }
4918
4919 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4920 {
4921         struct bnx2x_mcast_ramrod_params rparam;
4922         int rc;
4923
4924         memset(&rparam, 0, sizeof(rparam));
4925
4926         rparam.mcast_obj = &bp->mcast_obj;
4927
4928         netif_addr_lock_bh(bp->dev);
4929
4930         /* Clear pending state for the last command */
4931         bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4932
4933         /* If there are pending mcast commands - send them */
4934         if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4935                 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4936                 if (rc < 0)
4937                         BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4938                                   rc);
4939         }
4940
4941         netif_addr_unlock_bh(bp->dev);
4942 }
4943
4944 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4945                                             union event_ring_elem *elem)
4946 {
4947         unsigned long ramrod_flags = 0;
4948         int rc = 0;
4949         u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4950         struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4951
4952         /* Always push next commands out, don't wait here */
4953         __set_bit(RAMROD_CONT, &ramrod_flags);
4954
4955         switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
4956                             >> BNX2X_SWCID_SHIFT) {
4957         case BNX2X_FILTER_MAC_PENDING:
4958                 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
4959                 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
4960                         vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4961                 else
4962                         vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
4963
4964                 break;
4965         case BNX2X_FILTER_MCAST_PENDING:
4966                 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
4967                 /* This is only relevant for 57710 where multicast MACs are
4968                  * configured as unicast MACs using the same ramrod.
4969                  */
4970                 bnx2x_handle_mcast_eqe(bp);
4971                 return;
4972         default:
4973                 BNX2X_ERR("Unsupported classification command: %d\n",
4974                           elem->message.data.eth_event.echo);
4975                 return;
4976         }
4977
4978         rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4979
4980         if (rc < 0)
4981                 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4982         else if (rc > 0)
4983                 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4984 }
4985
4986 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4987
4988 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4989 {
4990         netif_addr_lock_bh(bp->dev);
4991
4992         clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4993
4994         /* Send rx_mode command again if was requested */
4995         if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4996                 bnx2x_set_storm_rx_mode(bp);
4997         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4998                                     &bp->sp_state))
4999                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5000         else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5001                                     &bp->sp_state))
5002                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5003
5004         netif_addr_unlock_bh(bp->dev);
5005 }
5006
5007 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5008                                               union event_ring_elem *elem)
5009 {
5010         if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5011                 DP(BNX2X_MSG_SP,
5012                    "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5013                    elem->message.data.vif_list_event.func_bit_map);
5014                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5015                         elem->message.data.vif_list_event.func_bit_map);
5016         } else if (elem->message.data.vif_list_event.echo ==
5017                    VIF_LIST_RULE_SET) {
5018                 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5019                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5020         }
5021 }
5022
5023 /* called with rtnl_lock */
5024 static void bnx2x_after_function_update(struct bnx2x *bp)
5025 {
5026         int q, rc;
5027         struct bnx2x_fastpath *fp;
5028         struct bnx2x_queue_state_params queue_params = {NULL};
5029         struct bnx2x_queue_update_params *q_update_params =
5030                 &queue_params.params.update;
5031
5032         /* Send Q update command with afex vlan removal values for all Qs */
5033         queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5034
5035         /* set silent vlan removal values according to vlan mode */
5036         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5037                   &q_update_params->update_flags);
5038         __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5039                   &q_update_params->update_flags);
5040         __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5041
5042         /* in access mode mark mask and value are 0 to strip all vlans */
5043         if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5044                 q_update_params->silent_removal_value = 0;
5045                 q_update_params->silent_removal_mask = 0;
5046         } else {
5047                 q_update_params->silent_removal_value =
5048                         (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5049                 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5050         }
5051
5052         for_each_eth_queue(bp, q) {
5053                 /* Set the appropriate Queue object */
5054                 fp = &bp->fp[q];
5055                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5056
5057                 /* send the ramrod */
5058                 rc = bnx2x_queue_state_change(bp, &queue_params);
5059                 if (rc < 0)
5060                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5061                                   q);
5062         }
5063
5064         if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5065                 fp = &bp->fp[FCOE_IDX(bp)];
5066                 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5067
5068                 /* clear pending completion bit */
5069                 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5070
5071                 /* mark latest Q bit */
5072                 smp_mb__before_clear_bit();
5073                 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5074                 smp_mb__after_clear_bit();
5075
5076                 /* send Q update ramrod for FCoE Q */
5077                 rc = bnx2x_queue_state_change(bp, &queue_params);
5078                 if (rc < 0)
5079                         BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5080                                   q);
5081         } else {
5082                 /* If no FCoE ring - ACK MCP now */
5083                 bnx2x_link_report(bp);
5084                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5085         }
5086 }
5087
5088 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5089         struct bnx2x *bp, u32 cid)
5090 {
5091         DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5092
5093         if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5094                 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5095         else
5096                 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5097 }
5098
5099 static void bnx2x_eq_int(struct bnx2x *bp)
5100 {
5101         u16 hw_cons, sw_cons, sw_prod;
5102         union event_ring_elem *elem;
5103         u8 echo;
5104         u32 cid;
5105         u8 opcode;
5106         int rc, spqe_cnt = 0;
5107         struct bnx2x_queue_sp_obj *q_obj;
5108         struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5109         struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5110
5111         hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5112
5113         /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5114          * when we get the next-page we need to adjust so the loop
5115          * condition below will be met. The next element is the size of a
5116          * regular element and hence incrementing by 1
5117          */
5118         if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5119                 hw_cons++;
5120
5121         /* This function may never run in parallel with itself for a
5122          * specific bp, thus there is no need in "paired" read memory
5123          * barrier here.
5124          */
5125         sw_cons = bp->eq_cons;
5126         sw_prod = bp->eq_prod;
5127
5128         DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
5129                         hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5130
5131         for (; sw_cons != hw_cons;
5132               sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5133
5134                 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5135
5136                 rc = bnx2x_iov_eq_sp_event(bp, elem);
5137                 if (!rc) {
5138                         DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5139                            rc);
5140                         goto next_spqe;
5141                 }
5142
5143                 /* elem CID originates from FW; actually LE */
5144                 cid = SW_CID((__force __le32)
5145                              elem->message.data.cfc_del_event.cid);
5146                 opcode = elem->message.opcode;
5147
5148                 /* handle eq element */
5149                 switch (opcode) {
5150                 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5151                         DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5152                         bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5153                         continue;
5154
5155                 case EVENT_RING_OPCODE_STAT_QUERY:
5156                         DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5157                            "got statistics comp event %d\n",
5158                            bp->stats_comp++);
5159                         /* nothing to do with stats comp */
5160                         goto next_spqe;
5161
5162                 case EVENT_RING_OPCODE_CFC_DEL:
5163                         /* handle according to cid range */
5164                         /*
5165                          * we may want to verify here that the bp state is
5166                          * HALTING
5167                          */
5168                         DP(BNX2X_MSG_SP,
5169                            "got delete ramrod for MULTI[%d]\n", cid);
5170
5171                         if (CNIC_LOADED(bp) &&
5172                             !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5173                                 goto next_spqe;
5174
5175                         q_obj = bnx2x_cid_to_q_obj(bp, cid);
5176
5177                         if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5178                                 break;
5179
5180                         goto next_spqe;
5181
5182                 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5183                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5184                         if (f_obj->complete_cmd(bp, f_obj,
5185                                                 BNX2X_F_CMD_TX_STOP))
5186                                 break;
5187                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5188                         goto next_spqe;
5189
5190                 case EVENT_RING_OPCODE_START_TRAFFIC:
5191                         DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5192                         if (f_obj->complete_cmd(bp, f_obj,
5193                                                 BNX2X_F_CMD_TX_START))
5194                                 break;
5195                         bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5196                         goto next_spqe;
5197
5198                 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5199                         echo = elem->message.data.function_update_event.echo;
5200                         if (echo == SWITCH_UPDATE) {
5201                                 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5202                                    "got FUNC_SWITCH_UPDATE ramrod\n");
5203                                 if (f_obj->complete_cmd(
5204                                         bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5205                                         break;
5206
5207                         } else {
5208                                 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5209                                    "AFEX: ramrod completed FUNCTION_UPDATE\n");
5210                                 f_obj->complete_cmd(bp, f_obj,
5211                                                     BNX2X_F_CMD_AFEX_UPDATE);
5212
5213                                 /* We will perform the Queues update from
5214                                  * sp_rtnl task as all Queue SP operations
5215                                  * should run under rtnl_lock.
5216                                  */
5217                                 smp_mb__before_clear_bit();
5218                                 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5219                                         &bp->sp_rtnl_state);
5220                                 smp_mb__after_clear_bit();
5221
5222                                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5223                         }
5224
5225                         goto next_spqe;
5226
5227                 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5228                         f_obj->complete_cmd(bp, f_obj,
5229                                             BNX2X_F_CMD_AFEX_VIFLISTS);
5230                         bnx2x_after_afex_vif_lists(bp, elem);
5231                         goto next_spqe;
5232                 case EVENT_RING_OPCODE_FUNCTION_START:
5233                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5234                            "got FUNC_START ramrod\n");
5235                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5236                                 break;
5237
5238                         goto next_spqe;
5239
5240                 case EVENT_RING_OPCODE_FUNCTION_STOP:
5241                         DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5242                            "got FUNC_STOP ramrod\n");
5243                         if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5244                                 break;
5245
5246                         goto next_spqe;
5247                 }
5248
5249                 switch (opcode | bp->state) {
5250                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5251                       BNX2X_STATE_OPEN):
5252                 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5253                       BNX2X_STATE_OPENING_WAIT4_PORT):
5254                         cid = elem->message.data.eth_event.echo &
5255                                 BNX2X_SWCID_MASK;
5256                         DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5257                            cid);
5258                         rss_raw->clear_pending(rss_raw);
5259                         break;
5260
5261                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5262                 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5263                 case (EVENT_RING_OPCODE_SET_MAC |
5264                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5265                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5266                       BNX2X_STATE_OPEN):
5267                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5268                       BNX2X_STATE_DIAG):
5269                 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5270                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5271                         DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
5272                         bnx2x_handle_classification_eqe(bp, elem);
5273                         break;
5274
5275                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5276                       BNX2X_STATE_OPEN):
5277                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5278                       BNX2X_STATE_DIAG):
5279                 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5280                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5281                         DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5282                         bnx2x_handle_mcast_eqe(bp);
5283                         break;
5284
5285                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5286                       BNX2X_STATE_OPEN):
5287                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5288                       BNX2X_STATE_DIAG):
5289                 case (EVENT_RING_OPCODE_FILTERS_RULES |
5290                       BNX2X_STATE_CLOSING_WAIT4_HALT):
5291                         DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5292                         bnx2x_handle_rx_mode_eqe(bp);
5293                         break;
5294                 default:
5295                         /* unknown event log error and continue */
5296                         BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5297                                   elem->message.opcode, bp->state);
5298                 }
5299 next_spqe:
5300                 spqe_cnt++;
5301         } /* for */
5302
5303         smp_mb__before_atomic_inc();
5304         atomic_add(spqe_cnt, &bp->eq_spq_left);
5305
5306         bp->eq_cons = sw_cons;
5307         bp->eq_prod = sw_prod;
5308         /* Make sure that above mem writes were issued towards the memory */
5309         smp_wmb();
5310
5311         /* update producer */
5312         bnx2x_update_eq_prod(bp, bp->eq_prod);
5313 }
5314
5315 static void bnx2x_sp_task(struct work_struct *work)
5316 {
5317         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5318
5319         DP(BNX2X_MSG_SP, "sp task invoked\n");
5320
5321         /* make sure the atomic interrupt_occurred has been written */
5322         smp_rmb();
5323         if (atomic_read(&bp->interrupt_occurred)) {
5324
5325                 /* what work needs to be performed? */
5326                 u16 status = bnx2x_update_dsb_idx(bp);
5327
5328                 DP(BNX2X_MSG_SP, "status %x\n", status);
5329                 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5330                 atomic_set(&bp->interrupt_occurred, 0);
5331
5332                 /* HW attentions */
5333                 if (status & BNX2X_DEF_SB_ATT_IDX) {
5334                         bnx2x_attn_int(bp);
5335                         status &= ~BNX2X_DEF_SB_ATT_IDX;
5336                 }
5337
5338                 /* SP events: STAT_QUERY and others */
5339                 if (status & BNX2X_DEF_SB_IDX) {
5340                         struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5341
5342                 if (FCOE_INIT(bp) &&
5343                             (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5344                                 /* Prevent local bottom-halves from running as
5345                                  * we are going to change the local NAPI list.
5346                                  */
5347                                 local_bh_disable();
5348                                 napi_schedule(&bnx2x_fcoe(bp, napi));
5349                                 local_bh_enable();
5350                         }
5351
5352                         /* Handle EQ completions */
5353                         bnx2x_eq_int(bp);
5354                         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5355                                      le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5356
5357                         status &= ~BNX2X_DEF_SB_IDX;
5358                 }
5359
5360                 /* if status is non zero then perhaps something went wrong */
5361                 if (unlikely(status))
5362                         DP(BNX2X_MSG_SP,
5363                            "got an unknown interrupt! (status 0x%x)\n", status);
5364
5365                 /* ack status block only if something was actually handled */
5366                 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5367                              le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5368         }
5369
5370         /* must be called after the EQ processing (since eq leads to sriov
5371          * ramrod completion flows).
5372          * This flow may have been scheduled by the arrival of a ramrod
5373          * completion, or by the sriov code rescheduling itself.
5374          */
5375         bnx2x_iov_sp_task(bp);
5376
5377         /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5378         if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5379                                &bp->sp_state)) {
5380                 bnx2x_link_report(bp);
5381                 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5382         }
5383 }
5384
5385 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5386 {
5387         struct net_device *dev = dev_instance;
5388         struct bnx2x *bp = netdev_priv(dev);
5389
5390         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5391                      IGU_INT_DISABLE, 0);
5392
5393 #ifdef BNX2X_STOP_ON_ERROR
5394         if (unlikely(bp->panic))
5395                 return IRQ_HANDLED;
5396 #endif
5397
5398         if (CNIC_LOADED(bp)) {
5399                 struct cnic_ops *c_ops;
5400
5401                 rcu_read_lock();
5402                 c_ops = rcu_dereference(bp->cnic_ops);
5403                 if (c_ops)
5404                         c_ops->cnic_handler(bp->cnic_data, NULL);
5405                 rcu_read_unlock();
5406         }
5407
5408         /* schedule sp task to perform default status block work, ack
5409          * attentions and enable interrupts.
5410          */
5411         bnx2x_schedule_sp_task(bp);
5412
5413         return IRQ_HANDLED;
5414 }
5415
5416 /* end of slow path */
5417
5418 void bnx2x_drv_pulse(struct bnx2x *bp)
5419 {
5420         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5421                  bp->fw_drv_pulse_wr_seq);
5422 }
5423
5424 static void bnx2x_timer(unsigned long data)
5425 {
5426         struct bnx2x *bp = (struct bnx2x *) data;
5427
5428         if (!netif_running(bp->dev))
5429                 return;
5430
5431         if (IS_PF(bp) &&
5432             !BP_NOMCP(bp)) {
5433                 int mb_idx = BP_FW_MB_IDX(bp);
5434                 u32 drv_pulse;
5435                 u32 mcp_pulse;
5436
5437                 ++bp->fw_drv_pulse_wr_seq;
5438                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5439                 /* TBD - add SYSTEM_TIME */
5440                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5441                 bnx2x_drv_pulse(bp);
5442
5443                 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5444                              MCP_PULSE_SEQ_MASK);
5445                 /* The delta between driver pulse and mcp response
5446                  * should be 1 (before mcp response) or 0 (after mcp response)
5447                  */
5448                 if ((drv_pulse != mcp_pulse) &&
5449                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5450                         /* someone lost a heartbeat... */
5451                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5452                                   drv_pulse, mcp_pulse);
5453                 }
5454         }
5455
5456         if (bp->state == BNX2X_STATE_OPEN)
5457                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5458
5459         /* sample pf vf bulletin board for new posts from pf */
5460         if (IS_VF(bp)) {
5461                 bnx2x_sample_bulletin(bp);
5462
5463                 /* if channel is down we need to self destruct */
5464                 if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
5465                         smp_mb__before_clear_bit();
5466                         set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
5467                                 &bp->sp_rtnl_state);
5468                         smp_mb__after_clear_bit();
5469                         schedule_delayed_work(&bp->sp_rtnl_task, 0);
5470                 }
5471         }
5472
5473         mod_timer(&bp->timer, jiffies + bp->current_interval);
5474 }
5475
5476 /* end of Statistics */
5477
5478 /* nic init */
5479
5480 /*
5481  * nic init service functions
5482  */
5483
5484 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5485 {
5486         u32 i;
5487         if (!(len%4) && !(addr%4))
5488                 for (i = 0; i < len; i += 4)
5489                         REG_WR(bp, addr + i, fill);
5490         else
5491                 for (i = 0; i < len; i++)
5492                         REG_WR8(bp, addr + i, fill);
5493 }
5494
5495 /* helper: writes FP SP data to FW - data_size in dwords */
5496 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5497                                 int fw_sb_id,
5498                                 u32 *sb_data_p,
5499                                 u32 data_size)
5500 {
5501         int index;
5502         for (index = 0; index < data_size; index++)
5503                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5504                         CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5505                         sizeof(u32)*index,
5506                         *(sb_data_p + index));
5507 }
5508
5509 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5510 {
5511         u32 *sb_data_p;
5512         u32 data_size = 0;
5513         struct hc_status_block_data_e2 sb_data_e2;
5514         struct hc_status_block_data_e1x sb_data_e1x;
5515
5516         /* disable the function first */
5517         if (!CHIP_IS_E1x(bp)) {
5518                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5519                 sb_data_e2.common.state = SB_DISABLED;
5520                 sb_data_e2.common.p_func.vf_valid = false;
5521                 sb_data_p = (u32 *)&sb_data_e2;
5522                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5523         } else {
5524                 memset(&sb_data_e1x, 0,
5525                        sizeof(struct hc_status_block_data_e1x));
5526                 sb_data_e1x.common.state = SB_DISABLED;
5527                 sb_data_e1x.common.p_func.vf_valid = false;
5528                 sb_data_p = (u32 *)&sb_data_e1x;
5529                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5530         }
5531         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5532
5533         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5534                         CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5535                         CSTORM_STATUS_BLOCK_SIZE);
5536         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5537                         CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5538                         CSTORM_SYNC_BLOCK_SIZE);
5539 }
5540
5541 /* helper:  writes SP SB data to FW */
5542 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5543                 struct hc_sp_status_block_data *sp_sb_data)
5544 {
5545         int func = BP_FUNC(bp);
5546         int i;
5547         for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5548                 REG_WR(bp, BAR_CSTRORM_INTMEM +
5549                         CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5550                         i*sizeof(u32),
5551                         *((u32 *)sp_sb_data + i));
5552 }
5553
5554 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5555 {
5556         int func = BP_FUNC(bp);
5557         struct hc_sp_status_block_data sp_sb_data;
5558         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5559
5560         sp_sb_data.state = SB_DISABLED;
5561         sp_sb_data.p_func.vf_valid = false;
5562
5563         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5564
5565         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5566                         CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5567                         CSTORM_SP_STATUS_BLOCK_SIZE);
5568         bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5569                         CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5570                         CSTORM_SP_SYNC_BLOCK_SIZE);
5571 }
5572
5573 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5574                                            int igu_sb_id, int igu_seg_id)
5575 {
5576         hc_sm->igu_sb_id = igu_sb_id;
5577         hc_sm->igu_seg_id = igu_seg_id;
5578         hc_sm->timer_value = 0xFF;
5579         hc_sm->time_to_expire = 0xFFFFFFFF;
5580 }
5581
5582 /* allocates state machine ids. */
5583 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5584 {
5585         /* zero out state machine indices */
5586         /* rx indices */
5587         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5588
5589         /* tx indices */
5590         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5591         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5592         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5593         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5594
5595         /* map indices */
5596         /* rx indices */
5597         index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5598                 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5599
5600         /* tx indices */
5601         index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5602                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5603         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5604                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5605         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5606                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5607         index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5608                 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5609 }
5610
5611 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5612                           u8 vf_valid, int fw_sb_id, int igu_sb_id)
5613 {
5614         int igu_seg_id;
5615
5616         struct hc_status_block_data_e2 sb_data_e2;
5617         struct hc_status_block_data_e1x sb_data_e1x;
5618         struct hc_status_block_sm  *hc_sm_p;
5619         int data_size;
5620         u32 *sb_data_p;
5621
5622         if (CHIP_INT_MODE_IS_BC(bp))
5623                 igu_seg_id = HC_SEG_ACCESS_NORM;
5624         else
5625                 igu_seg_id = IGU_SEG_ACCESS_NORM;
5626
5627         bnx2x_zero_fp_sb(bp, fw_sb_id);
5628
5629         if (!CHIP_IS_E1x(bp)) {
5630                 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5631                 sb_data_e2.common.state = SB_ENABLED;
5632                 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5633                 sb_data_e2.common.p_func.vf_id = vfid;
5634                 sb_data_e2.common.p_func.vf_valid = vf_valid;
5635                 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5636                 sb_data_e2.common.same_igu_sb_1b = true;
5637                 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5638                 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5639                 hc_sm_p = sb_data_e2.common.state_machine;
5640                 sb_data_p = (u32 *)&sb_data_e2;
5641                 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5642                 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5643         } else {
5644                 memset(&sb_data_e1x, 0,
5645                        sizeof(struct hc_status_block_data_e1x));
5646                 sb_data_e1x.common.state = SB_ENABLED;
5647                 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5648                 sb_data_e1x.common.p_func.vf_id = 0xff;
5649                 sb_data_e1x.common.p_func.vf_valid = false;
5650                 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5651                 sb_data_e1x.common.same_igu_sb_1b = true;
5652                 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5653                 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5654                 hc_sm_p = sb_data_e1x.common.state_machine;
5655                 sb_data_p = (u32 *)&sb_data_e1x;
5656                 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5657                 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
5658         }
5659
5660         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5661                                        igu_sb_id, igu_seg_id);
5662         bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5663                                        igu_sb_id, igu_seg_id);
5664
5665         DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
5666
5667         /* write indices to HW - PCI guarantees endianity of regpairs */
5668         bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5669 }
5670
5671 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
5672                                      u16 tx_usec, u16 rx_usec)
5673 {
5674         bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
5675                                     false, rx_usec);
5676         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5677                                        HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5678                                        tx_usec);
5679         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5680                                        HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5681                                        tx_usec);
5682         bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5683                                        HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5684                                        tx_usec);
5685 }
5686
5687 static void bnx2x_init_def_sb(struct bnx2x *bp)
5688 {
5689         struct host_sp_status_block *def_sb = bp->def_status_blk;
5690         dma_addr_t mapping = bp->def_status_blk_mapping;
5691         int igu_sp_sb_index;
5692         int igu_seg_id;
5693         int port = BP_PORT(bp);
5694         int func = BP_FUNC(bp);
5695         int reg_offset, reg_offset_en5;
5696         u64 section;
5697         int index;
5698         struct hc_sp_status_block_data sp_sb_data;
5699         memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5700
5701         if (CHIP_INT_MODE_IS_BC(bp)) {
5702                 igu_sp_sb_index = DEF_SB_IGU_ID;
5703                 igu_seg_id = HC_SEG_ACCESS_DEF;
5704         } else {
5705                 igu_sp_sb_index = bp->igu_dsb_id;
5706                 igu_seg_id = IGU_SEG_ACCESS_DEF;
5707         }
5708
5709         /* ATTN */
5710         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5711                                             atten_status_block);
5712         def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
5713
5714         bp->attn_state = 0;
5715
5716         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5717                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5718         reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5719                                  MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
5720         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5721                 int sindex;
5722                 /* take care of sig[0]..sig[4] */
5723                 for (sindex = 0; sindex < 4; sindex++)
5724                         bp->attn_group[index].sig[sindex] =
5725                            REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
5726
5727                 if (!CHIP_IS_E1x(bp))
5728                         /*
5729                          * enable5 is separate from the rest of the registers,
5730                          * and therefore the address skip is 4
5731                          * and not 16 between the different groups
5732                          */
5733                         bp->attn_group[index].sig[4] = REG_RD(bp,
5734                                         reg_offset_en5 + 0x4*index);
5735                 else
5736                         bp->attn_group[index].sig[4] = 0;
5737         }
5738
5739         if (bp->common.int_block == INT_BLOCK_HC) {
5740                 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5741                                      HC_REG_ATTN_MSG0_ADDR_L);
5742
5743                 REG_WR(bp, reg_offset, U64_LO(section));
5744                 REG_WR(bp, reg_offset + 4, U64_HI(section));
5745         } else if (!CHIP_IS_E1x(bp)) {
5746                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5747                 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5748         }
5749
5750         section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5751                                             sp_sb);
5752
5753         bnx2x_zero_sp_sb(bp);
5754
5755         /* PCI guarantees endianity of regpairs */
5756         sp_sb_data.state                = SB_ENABLED;
5757         sp_sb_data.host_sb_addr.lo      = U64_LO(section);
5758         sp_sb_data.host_sb_addr.hi      = U64_HI(section);
5759         sp_sb_data.igu_sb_id            = igu_sp_sb_index;
5760         sp_sb_data.igu_seg_id           = igu_seg_id;
5761         sp_sb_data.p_func.pf_id         = func;
5762         sp_sb_data.p_func.vnic_id       = BP_VN(bp);
5763         sp_sb_data.p_func.vf_id         = 0xff;
5764
5765         bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5766
5767         bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
5768 }
5769
5770 void bnx2x_update_coalesce(struct bnx2x *bp)
5771 {
5772         int i;
5773
5774         for_each_eth_queue(bp, i)
5775                 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
5776                                          bp->tx_ticks, bp->rx_ticks);
5777 }
5778
5779 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5780 {
5781         spin_lock_init(&bp->spq_lock);
5782         atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
5783
5784         bp->spq_prod_idx = 0;
5785         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5786         bp->spq_prod_bd = bp->spq;
5787         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5788 }
5789
5790 static void bnx2x_init_eq_ring(struct bnx2x *bp)
5791 {
5792         int i;
5793         for (i = 1; i <= NUM_EQ_PAGES; i++) {
5794                 union event_ring_elem *elem =
5795                         &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
5796
5797                 elem->next_page.addr.hi =
5798                         cpu_to_le32(U64_HI(bp->eq_mapping +
5799                                    BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5800                 elem->next_page.addr.lo =
5801                         cpu_to_le32(U64_LO(bp->eq_mapping +
5802                                    BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
5803         }
5804         bp->eq_cons = 0;
5805         bp->eq_prod = NUM_EQ_DESC;
5806         bp->eq_cons_sb = BNX2X_EQ_INDEX;
5807         /* we want a warning message before it gets wrought... */
5808         atomic_set(&bp->eq_spq_left,
5809                 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
5810 }
5811
5812 /* called with netif_addr_lock_bh() */
5813 int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5814                         unsigned long rx_mode_flags,
5815                         unsigned long rx_accept_flags,
5816                         unsigned long tx_accept_flags,
5817                         unsigned long ramrod_flags)
5818 {
5819         struct bnx2x_rx_mode_ramrod_params ramrod_param;
5820         int rc;
5821
5822         memset(&ramrod_param, 0, sizeof(ramrod_param));
5823
5824         /* Prepare ramrod parameters */
5825         ramrod_param.cid = 0;
5826         ramrod_param.cl_id = cl_id;
5827         ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5828         ramrod_param.func_id = BP_FUNC(bp);
5829
5830         ramrod_param.pstate = &bp->sp_state;
5831         ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
5832
5833         ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5834         ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5835
5836         set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5837
5838         ramrod_param.ramrod_flags = ramrod_flags;
5839         ramrod_param.rx_mode_flags = rx_mode_flags;
5840
5841         ramrod_param.rx_accept_flags = rx_accept_flags;
5842         ramrod_param.tx_accept_flags = tx_accept_flags;
5843
5844         rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5845         if (rc < 0) {
5846                 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
5847                 return rc;
5848         }
5849
5850         return 0;
5851 }
5852
5853 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5854                                    unsigned long *rx_accept_flags,
5855                                    unsigned long *tx_accept_flags)
5856 {
5857         /* Clear the flags first */
5858         *rx_accept_flags = 0;
5859         *tx_accept_flags = 0;
5860
5861         switch (rx_mode) {
5862         case BNX2X_RX_MODE_NONE:
5863                 /*
5864                  * 'drop all' supersedes any accept flags that may have been
5865                  * passed to the function.
5866                  */
5867                 break;
5868         case BNX2X_RX_MODE_NORMAL:
5869                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5870                 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5871                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5872
5873                 /* internal switching mode */
5874                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5875                 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5876                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5877
5878                 break;
5879         case BNX2X_RX_MODE_ALLMULTI:
5880                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5881                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5882                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5883
5884                 /* internal switching mode */
5885                 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5886                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5887                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5888
5889                 break;
5890         case BNX2X_RX_MODE_PROMISC:
5891                 /* According to definition of SI mode, iface in promisc mode
5892                  * should receive matched and unmatched (in resolution of port)
5893                  * unicast packets.
5894                  */
5895                 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5896                 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5897                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5898                 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
5899
5900                 /* internal switching mode */
5901                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5902                 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
5903
5904                 if (IS_MF_SI(bp))
5905                         __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
5906                 else
5907                         __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5908
5909                 break;
5910         default:
5911                 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5912                 return -EINVAL;
5913         }
5914
5915         /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
5916         if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
5917                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5918                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
5919         }
5920
5921         return 0;
5922 }
5923
5924 /* called with netif_addr_lock_bh() */
5925 int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5926 {
5927         unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5928         unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5929         int rc;
5930
5931         if (!NO_FCOE(bp))
5932                 /* Configure rx_mode of FCoE Queue */
5933                 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5934
5935         rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5936                                      &tx_accept_flags);
5937         if (rc)
5938                 return rc;
5939
5940         __set_bit(RAMROD_RX, &ramrod_flags);
5941         __set_bit(RAMROD_TX, &ramrod_flags);
5942
5943         return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
5944                                    rx_accept_flags, tx_accept_flags,
5945                                    ramrod_flags);
5946 }
5947
5948 static void bnx2x_init_internal_common(struct bnx2x *bp)
5949 {
5950         int i;
5951
5952         if (IS_MF_SI(bp))
5953                 /*
5954                  * In switch independent mode, the TSTORM needs to accept
5955                  * packets that failed classification, since approximate match
5956                  * mac addresses aren't written to NIG LLH
5957                  */
5958                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5959                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
5960         else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5961                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5962                             TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
5963
5964         /* Zero this manually as its initialization is
5965            currently missing in the initTool */
5966         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5967                 REG_WR(bp, BAR_USTRORM_INTMEM +
5968                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5969         if (!CHIP_IS_E1x(bp)) {
5970                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5971                         CHIP_INT_MODE_IS_BC(bp) ?
5972                         HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5973         }
5974 }
5975
5976 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5977 {
5978         switch (load_code) {
5979         case FW_MSG_CODE_DRV_LOAD_COMMON:
5980         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5981                 bnx2x_init_internal_common(bp);
5982                 /* no break */
5983
5984         case FW_MSG_CODE_DRV_LOAD_PORT:
5985                 /* nothing to do */
5986                 /* no break */
5987
5988         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5989                 /* internal memory per function is
5990                    initialized inside bnx2x_pf_init */
5991                 break;
5992
5993         default:
5994                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5995                 break;
5996         }
5997 }
5998
5999 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6000 {
6001         return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6002 }
6003
6004 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6005 {
6006         return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6007 }
6008
6009 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6010 {
6011         if (CHIP_IS_E1x(fp->bp))
6012                 return BP_L_ID(fp->bp) + fp->index;
6013         else    /* We want Client ID to be the same as IGU SB ID for 57712 */
6014                 return bnx2x_fp_igu_sb_id(fp);
6015 }
6016
6017 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6018 {
6019         struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6020         u8 cos;
6021         unsigned long q_type = 0;
6022         u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6023         fp->rx_queue = fp_idx;
6024         fp->cid = fp_idx;
6025         fp->cl_id = bnx2x_fp_cl_id(fp);
6026         fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6027         fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6028         /* qZone id equals to FW (per path) client id */
6029         fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
6030
6031         /* init shortcut */
6032         fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6033
6034         /* Setup SB indices */
6035         fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6036
6037         /* Configure Queue State object */
6038         __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6039         __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6040
6041         BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6042
6043         /* init tx data */
6044         for_each_cos_in_tx_queue(fp, cos) {
6045                 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6046                                   CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6047                                   FP_COS_TO_TXQ(fp, cos, bp),
6048                                   BNX2X_TX_SB_INDEX_BASE + cos, fp);
6049                 cids[cos] = fp->txdata_ptr[cos]->cid;
6050         }
6051
6052         /* nothing more for vf to do here */
6053         if (IS_VF(bp))
6054                 return;
6055
6056         bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6057                       fp->fw_sb_id, fp->igu_sb_id);
6058         bnx2x_update_fpsb_idx(fp);
6059         bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6060                              fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6061                              bnx2x_sp_mapping(bp, q_rdata), q_type);
6062
6063         /**
6064          * Configure classification DBs: Always enable Tx switching
6065          */
6066         bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6067
6068         DP(NETIF_MSG_IFUP,
6069            "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  fw_sb %d  igu_sb %d\n",
6070            fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6071            fp->igu_sb_id);
6072 }
6073
6074 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6075 {
6076         int i;
6077
6078         for (i = 1; i <= NUM_TX_RINGS; i++) {
6079                 struct eth_tx_next_bd *tx_next_bd =
6080                         &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6081
6082                 tx_next_bd->addr_hi =
6083                         cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6084                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6085                 tx_next_bd->addr_lo =
6086                         cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6087                                     BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6088         }
6089
6090         *txdata->tx_cons_sb = cpu_to_le16(0);
6091
6092         SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6093         txdata->tx_db.data.zero_fill1 = 0;
6094         txdata->tx_db.data.prod = 0;
6095
6096         txdata->tx_pkt_prod = 0;
6097         txdata->tx_pkt_cons = 0;
6098         txdata->tx_bd_prod = 0;
6099         txdata->tx_bd_cons = 0;
6100         txdata->tx_pkt = 0;
6101 }
6102
6103 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6104 {
6105         int i;
6106
6107         for_each_tx_queue_cnic(bp, i)
6108                 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6109 }
6110
6111 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6112 {
6113         int i;
6114         u8 cos;
6115
6116         for_each_eth_queue(bp, i)
6117                 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6118                         bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6119 }
6120
6121 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6122 {
6123         if (!NO_FCOE(bp))
6124                 bnx2x_init_fcoe_fp(bp);
6125
6126         bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6127                       BNX2X_VF_ID_INVALID, false,
6128                       bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6129
6130         /* ensure status block indices were read */
6131         rmb();
6132         bnx2x_init_rx_rings_cnic(bp);
6133         bnx2x_init_tx_rings_cnic(bp);
6134
6135         /* flush all */
6136         mb();
6137         mmiowb();
6138 }
6139
6140 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6141 {
6142         int i;
6143
6144         /* Setup NIC internals and enable interrupts */
6145         for_each_eth_queue(bp, i)
6146                 bnx2x_init_eth_fp(bp, i);
6147
6148         /* ensure status block indices were read */
6149         rmb();
6150         bnx2x_init_rx_rings(bp);
6151         bnx2x_init_tx_rings(bp);
6152
6153         if (IS_PF(bp)) {
6154                 /* Initialize MOD_ABS interrupts */
6155                 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6156                                        bp->common.shmem_base,
6157                                        bp->common.shmem2_base, BP_PORT(bp));
6158
6159                 /* initialize the default status block and sp ring */
6160                 bnx2x_init_def_sb(bp);
6161                 bnx2x_update_dsb_idx(bp);
6162                 bnx2x_init_sp_ring(bp);
6163         } else {
6164                 bnx2x_memset_stats(bp);
6165         }
6166 }
6167
6168 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6169 {
6170         bnx2x_init_eq_ring(bp);
6171         bnx2x_init_internal(bp, load_code);
6172         bnx2x_pf_init(bp);
6173         bnx2x_stats_init(bp);
6174
6175         /* flush all before enabling interrupts */
6176         mb();
6177         mmiowb();
6178
6179         bnx2x_int_enable(bp);
6180
6181         /* Check for SPIO5 */
6182         bnx2x_attn_int_deasserted0(bp,
6183                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6184                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6185 }
6186
6187 /* gzip service functions */
6188 static int bnx2x_gunzip_init(struct bnx2x *bp)
6189 {
6190         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6191                                             &bp->gunzip_mapping, GFP_KERNEL);
6192         if (bp->gunzip_buf  == NULL)
6193                 goto gunzip_nomem1;
6194
6195         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6196         if (bp->strm  == NULL)
6197                 goto gunzip_nomem2;
6198
6199         bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6200         if (bp->strm->workspace == NULL)
6201                 goto gunzip_nomem3;
6202
6203         return 0;
6204
6205 gunzip_nomem3:
6206         kfree(bp->strm);
6207         bp->strm = NULL;
6208
6209 gunzip_nomem2:
6210         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6211                           bp->gunzip_mapping);
6212         bp->gunzip_buf = NULL;
6213
6214 gunzip_nomem1:
6215         BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6216         return -ENOMEM;
6217 }
6218
6219 static void bnx2x_gunzip_end(struct bnx2x *bp)
6220 {
6221         if (bp->strm) {
6222                 vfree(bp->strm->workspace);
6223                 kfree(bp->strm);
6224                 bp->strm = NULL;
6225         }
6226
6227         if (bp->gunzip_buf) {
6228                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6229                                   bp->gunzip_mapping);
6230                 bp->gunzip_buf = NULL;
6231         }
6232 }
6233
6234 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6235 {
6236         int n, rc;
6237
6238         /* check gzip header */
6239         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6240                 BNX2X_ERR("Bad gzip header\n");
6241                 return -EINVAL;
6242         }
6243
6244         n = 10;
6245
6246 #define FNAME                           0x8
6247
6248         if (zbuf[3] & FNAME)
6249                 while ((zbuf[n++] != 0) && (n < len));
6250
6251         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6252         bp->strm->avail_in = len - n;
6253         bp->strm->next_out = bp->gunzip_buf;
6254         bp->strm->avail_out = FW_BUF_SIZE;
6255
6256         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6257         if (rc != Z_OK)
6258                 return rc;
6259
6260         rc = zlib_inflate(bp->strm, Z_FINISH);
6261         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6262                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6263                            bp->strm->msg);
6264
6265         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6266         if (bp->gunzip_outlen & 0x3)
6267                 netdev_err(bp->dev,
6268                            "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6269                                 bp->gunzip_outlen);
6270         bp->gunzip_outlen >>= 2;
6271
6272         zlib_inflateEnd(bp->strm);
6273
6274         if (rc == Z_STREAM_END)
6275                 return 0;
6276
6277         return rc;
6278 }
6279
6280 /* nic load/unload */
6281
6282 /*
6283  * General service functions
6284  */
6285
6286 /* send a NIG loopback debug packet */
6287 static void bnx2x_lb_pckt(struct bnx2x *bp)
6288 {
6289         u32 wb_write[3];
6290
6291         /* Ethernet source and destination addresses */
6292         wb_write[0] = 0x55555555;
6293         wb_write[1] = 0x55555555;
6294         wb_write[2] = 0x20;             /* SOP */
6295         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6296
6297         /* NON-IP protocol */
6298         wb_write[0] = 0x09000000;
6299         wb_write[1] = 0x55555555;
6300         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6301         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6302 }
6303
6304 /* some of the internal memories
6305  * are not directly readable from the driver
6306  * to test them we send debug packets
6307  */
6308 static int bnx2x_int_mem_test(struct bnx2x *bp)
6309 {
6310         int factor;
6311         int count, i;
6312         u32 val = 0;
6313
6314         if (CHIP_REV_IS_FPGA(bp))
6315                 factor = 120;
6316         else if (CHIP_REV_IS_EMUL(bp))
6317                 factor = 200;
6318         else
6319                 factor = 1;
6320
6321         /* Disable inputs of parser neighbor blocks */
6322         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6323         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6324         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6325         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6326
6327         /*  Write 0 to parser credits for CFC search request */
6328         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6329
6330         /* send Ethernet packet */
6331         bnx2x_lb_pckt(bp);
6332
6333         /* TODO do i reset NIG statistic? */
6334         /* Wait until NIG register shows 1 packet of size 0x10 */
6335         count = 1000 * factor;
6336         while (count) {
6337
6338                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6339                 val = *bnx2x_sp(bp, wb_data[0]);
6340                 if (val == 0x10)
6341                         break;
6342
6343                 usleep_range(10000, 20000);
6344                 count--;
6345         }
6346         if (val != 0x10) {
6347                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6348                 return -1;
6349         }
6350
6351         /* Wait until PRS register shows 1 packet */
6352         count = 1000 * factor;
6353         while (count) {
6354                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6355                 if (val == 1)
6356                         break;
6357
6358                 usleep_range(10000, 20000);
6359                 count--;
6360         }
6361         if (val != 0x1) {
6362                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6363                 return -2;
6364         }
6365
6366         /* Reset and init BRB, PRS */
6367         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6368         msleep(50);
6369         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6370         msleep(50);
6371         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6372         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6373
6374         DP(NETIF_MSG_HW, "part2\n");
6375
6376         /* Disable inputs of parser neighbor blocks */
6377         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6378         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6379         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6380         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6381
6382         /* Write 0 to parser credits for CFC search request */
6383         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6384
6385         /* send 10 Ethernet packets */
6386         for (i = 0; i < 10; i++)
6387                 bnx2x_lb_pckt(bp);
6388
6389         /* Wait until NIG register shows 10 + 1
6390            packets of size 11*0x10 = 0xb0 */
6391         count = 1000 * factor;
6392         while (count) {
6393
6394                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6395                 val = *bnx2x_sp(bp, wb_data[0]);
6396                 if (val == 0xb0)
6397                         break;
6398
6399                 usleep_range(10000, 20000);
6400                 count--;
6401         }
6402         if (val != 0xb0) {
6403                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6404                 return -3;
6405         }
6406
6407         /* Wait until PRS register shows 2 packets */
6408         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6409         if (val != 2)
6410                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6411
6412         /* Write 1 to parser credits for CFC search request */
6413         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6414
6415         /* Wait until PRS register shows 3 packets */
6416         msleep(10 * factor);
6417         /* Wait until NIG register shows 1 packet of size 0x10 */
6418         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6419         if (val != 3)
6420                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6421
6422         /* clear NIG EOP FIFO */
6423         for (i = 0; i < 11; i++)
6424                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6425         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6426         if (val != 1) {
6427                 BNX2X_ERR("clear of NIG failed\n");
6428                 return -4;
6429         }
6430
6431         /* Reset and init BRB, PRS, NIG */
6432         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6433         msleep(50);
6434         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6435         msleep(50);
6436         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6437         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6438         if (!CNIC_SUPPORT(bp))
6439                 /* set NIC mode */
6440                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6441
6442         /* Enable inputs of parser neighbor blocks */
6443         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6444         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6445         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6446         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6447
6448         DP(NETIF_MSG_HW, "done\n");
6449
6450         return 0; /* OK */
6451 }
6452
6453 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6454 {
6455         u32 val;
6456
6457         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6458         if (!CHIP_IS_E1x(bp))
6459                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6460         else
6461                 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6462         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6463         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6464         /*
6465          * mask read length error interrupts in brb for parser
6466          * (parsing unit and 'checksum and crc' unit)
6467          * these errors are legal (PU reads fixed length and CAC can cause
6468          * read length error on truncated packets)
6469          */
6470         REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6471         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6472         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6473         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6474         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6475         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6476 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6477 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6478         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6479         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6480         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6481 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6482 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6483         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6484         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6485         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6486         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6487 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6488 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6489
6490         val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT  |
6491                 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6492                 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6493         if (!CHIP_IS_E1x(bp))
6494                 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6495                         PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6496         REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6497
6498         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6499         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6500         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6501 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6502
6503         if (!CHIP_IS_E1x(bp))
6504                 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6505                 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6506
6507         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6508         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6509 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6510         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);         /* bit 3,4 masked */
6511 }
6512
6513 static void bnx2x_reset_common(struct bnx2x *bp)
6514 {
6515         u32 val = 0x1400;
6516
6517         /* reset_common */
6518         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6519                0xd3ffff7f);
6520
6521         if (CHIP_IS_E3(bp)) {
6522                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6523                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6524         }
6525
6526         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6527 }
6528
6529 static void bnx2x_setup_dmae(struct bnx2x *bp)
6530 {
6531         bp->dmae_ready = 0;
6532         spin_lock_init(&bp->dmae_lock);
6533 }
6534
6535 static void bnx2x_init_pxp(struct bnx2x *bp)
6536 {
6537         u16 devctl;
6538         int r_order, w_order;
6539
6540         pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6541         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6542         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6543         if (bp->mrrs == -1)
6544                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6545         else {
6546                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6547                 r_order = bp->mrrs;
6548         }
6549
6550         bnx2x_init_pxp_arb(bp, r_order, w_order);
6551 }
6552
6553 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6554 {
6555         int is_required;
6556         u32 val;
6557         int port;
6558
6559         if (BP_NOMCP(bp))
6560                 return;
6561
6562         is_required = 0;
6563         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6564               SHARED_HW_CFG_FAN_FAILURE_MASK;
6565
6566         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6567                 is_required = 1;
6568
6569         /*
6570          * The fan failure mechanism is usually related to the PHY type since
6571          * the power consumption of the board is affected by the PHY. Currently,
6572          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6573          */
6574         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6575                 for (port = PORT_0; port < PORT_MAX; port++) {
6576                         is_required |=
6577                                 bnx2x_fan_failure_det_req(
6578                                         bp,
6579                                         bp->common.shmem_base,
6580                                         bp->common.shmem2_base,
6581                                         port);
6582                 }
6583
6584         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6585
6586         if (is_required == 0)
6587                 return;
6588
6589         /* Fan failure is indicated by SPIO 5 */
6590         bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6591
6592         /* set to active low mode */
6593         val = REG_RD(bp, MISC_REG_SPIO_INT);
6594         val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6595         REG_WR(bp, MISC_REG_SPIO_INT, val);
6596
6597         /* enable interrupt to signal the IGU */
6598         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6599         val |= MISC_SPIO_SPIO5;
6600         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6601 }
6602
6603 void bnx2x_pf_disable(struct bnx2x *bp)
6604 {
6605         u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6606         val &= ~IGU_PF_CONF_FUNC_EN;
6607
6608         REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6609         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6610         REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6611 }
6612
6613 static void bnx2x__common_init_phy(struct bnx2x *bp)
6614 {
6615         u32 shmem_base[2], shmem2_base[2];
6616         /* Avoid common init in case MFW supports LFA */
6617         if (SHMEM2_RD(bp, size) >
6618             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6619                 return;
6620         shmem_base[0] =  bp->common.shmem_base;
6621         shmem2_base[0] = bp->common.shmem2_base;
6622         if (!CHIP_IS_E1x(bp)) {
6623                 shmem_base[1] =
6624                         SHMEM2_RD(bp, other_shmem_base_addr);
6625                 shmem2_base[1] =
6626                         SHMEM2_RD(bp, other_shmem2_base_addr);
6627         }
6628         bnx2x_acquire_phy_lock(bp);
6629         bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6630                               bp->common.chip_id);
6631         bnx2x_release_phy_lock(bp);
6632 }
6633
6634 /**
6635  * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6636  *
6637  * @bp:         driver handle
6638  */
6639 static int bnx2x_init_hw_common(struct bnx2x *bp)
6640 {
6641         u32 val;
6642
6643         DP(NETIF_MSG_HW, "starting common init  func %d\n", BP_ABS_FUNC(bp));
6644
6645         /*
6646          * take the RESET lock to protect undi_unload flow from accessing
6647          * registers while we're resetting the chip
6648          */
6649         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6650
6651         bnx2x_reset_common(bp);
6652         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6653
6654         val = 0xfffc;
6655         if (CHIP_IS_E3(bp)) {
6656                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6657                 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6658         }
6659         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6660
6661         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
6662
6663         bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
6664
6665         if (!CHIP_IS_E1x(bp)) {
6666                 u8 abs_func_id;
6667
6668                 /**
6669                  * 4-port mode or 2-port mode we need to turn of master-enable
6670                  * for everyone, after that, turn it back on for self.
6671                  * so, we disregard multi-function or not, and always disable
6672                  * for all functions on the given path, this means 0,2,4,6 for
6673                  * path 0 and 1,3,5,7 for path 1
6674                  */
6675                 for (abs_func_id = BP_PATH(bp);
6676                      abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6677                         if (abs_func_id == BP_ABS_FUNC(bp)) {
6678                                 REG_WR(bp,
6679                                     PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6680                                     1);
6681                                 continue;
6682                         }
6683
6684                         bnx2x_pretend_func(bp, abs_func_id);
6685                         /* clear pf enable */
6686                         bnx2x_pf_disable(bp);
6687                         bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6688                 }
6689         }
6690
6691         bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
6692         if (CHIP_IS_E1(bp)) {
6693                 /* enable HW interrupt from PXP on USDM overflow
6694                    bit 16 on INT_MASK_0 */
6695                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6696         }
6697
6698         bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
6699         bnx2x_init_pxp(bp);
6700
6701 #ifdef __BIG_ENDIAN
6702         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6703         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6704         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6705         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6706         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6707         /* make sure this value is 0 */
6708         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6709
6710 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6711         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6712         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6713         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6714         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6715 #endif
6716
6717         bnx2x_ilt_init_page_size(bp, INITOP_SET);
6718
6719         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6720                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6721
6722         /* let the HW do it's magic ... */
6723         msleep(100);
6724         /* finish PXP init */
6725         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6726         if (val != 1) {
6727                 BNX2X_ERR("PXP2 CFG failed\n");
6728                 return -EBUSY;
6729         }
6730         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6731         if (val != 1) {
6732                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6733                 return -EBUSY;
6734         }
6735
6736         /* Timers bug workaround E2 only. We need to set the entire ILT to
6737          * have entries with value "0" and valid bit on.
6738          * This needs to be done by the first PF that is loaded in a path
6739          * (i.e. common phase)
6740          */
6741         if (!CHIP_IS_E1x(bp)) {
6742 /* In E2 there is a bug in the timers block that can cause function 6 / 7
6743  * (i.e. vnic3) to start even if it is marked as "scan-off".
6744  * This occurs when a different function (func2,3) is being marked
6745  * as "scan-off". Real-life scenario for example: if a driver is being
6746  * load-unloaded while func6,7 are down. This will cause the timer to access
6747  * the ilt, translate to a logical address and send a request to read/write.
6748  * Since the ilt for the function that is down is not valid, this will cause
6749  * a translation error which is unrecoverable.
6750  * The Workaround is intended to make sure that when this happens nothing fatal
6751  * will occur. The workaround:
6752  *      1.  First PF driver which loads on a path will:
6753  *              a.  After taking the chip out of reset, by using pretend,
6754  *                  it will write "0" to the following registers of
6755  *                  the other vnics.
6756  *                  REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6757  *                  REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6758  *                  REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6759  *                  And for itself it will write '1' to
6760  *                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6761  *                  dmae-operations (writing to pram for example.)
6762  *                  note: can be done for only function 6,7 but cleaner this
6763  *                        way.
6764  *              b.  Write zero+valid to the entire ILT.
6765  *              c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
6766  *                  VNIC3 (of that port). The range allocated will be the
6767  *                  entire ILT. This is needed to prevent  ILT range error.
6768  *      2.  Any PF driver load flow:
6769  *              a.  ILT update with the physical addresses of the allocated
6770  *                  logical pages.
6771  *              b.  Wait 20msec. - note that this timeout is needed to make
6772  *                  sure there are no requests in one of the PXP internal
6773  *                  queues with "old" ILT addresses.
6774  *              c.  PF enable in the PGLC.
6775  *              d.  Clear the was_error of the PF in the PGLC. (could have
6776  *                  occurred while driver was down)
6777  *              e.  PF enable in the CFC (WEAK + STRONG)
6778  *              f.  Timers scan enable
6779  *      3.  PF driver unload flow:
6780  *              a.  Clear the Timers scan_en.
6781  *              b.  Polling for scan_on=0 for that PF.
6782  *              c.  Clear the PF enable bit in the PXP.
6783  *              d.  Clear the PF enable in the CFC (WEAK + STRONG)
6784  *              e.  Write zero+valid to all ILT entries (The valid bit must
6785  *                  stay set)
6786  *              f.  If this is VNIC 3 of a port then also init
6787  *                  first_timers_ilt_entry to zero and last_timers_ilt_entry
6788  *                  to the last entry in the ILT.
6789  *
6790  *      Notes:
6791  *      Currently the PF error in the PGLC is non recoverable.
6792  *      In the future the there will be a recovery routine for this error.
6793  *      Currently attention is masked.
6794  *      Having an MCP lock on the load/unload process does not guarantee that
6795  *      there is no Timer disable during Func6/7 enable. This is because the
6796  *      Timers scan is currently being cleared by the MCP on FLR.
6797  *      Step 2.d can be done only for PF6/7 and the driver can also check if
6798  *      there is error before clearing it. But the flow above is simpler and
6799  *      more general.
6800  *      All ILT entries are written by zero+valid and not just PF6/7
6801  *      ILT entries since in the future the ILT entries allocation for
6802  *      PF-s might be dynamic.
6803  */
6804                 struct ilt_client_info ilt_cli;
6805                 struct bnx2x_ilt ilt;
6806                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6807                 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6808
6809                 /* initialize dummy TM client */
6810                 ilt_cli.start = 0;
6811                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6812                 ilt_cli.client_num = ILT_CLIENT_TM;
6813
6814                 /* Step 1: set zeroes to all ilt page entries with valid bit on
6815                  * Step 2: set the timers first/last ilt entry to point
6816                  * to the entire range to prevent ILT range error for 3rd/4th
6817                  * vnic (this code assumes existence of the vnic)
6818                  *
6819                  * both steps performed by call to bnx2x_ilt_client_init_op()
6820                  * with dummy TM client
6821                  *
6822                  * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6823                  * and his brother are split registers
6824                  */
6825                 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6826                 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6827                 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6828
6829                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6830                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6831                 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6832         }
6833
6834         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6835         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6836
6837         if (!CHIP_IS_E1x(bp)) {
6838                 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6839                                 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
6840                 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
6841
6842                 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
6843
6844                 /* let the HW do it's magic ... */
6845                 do {
6846                         msleep(200);
6847                         val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6848                 } while (factor-- && (val != 1));
6849
6850                 if (val != 1) {
6851                         BNX2X_ERR("ATC_INIT failed\n");
6852                         return -EBUSY;
6853                 }
6854         }
6855
6856         bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
6857
6858         bnx2x_iov_init_dmae(bp);
6859
6860         /* clean the DMAE memory */
6861         bp->dmae_ready = 1;
6862         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6863
6864         bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6865
6866         bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6867
6868         bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
6869
6870         bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
6871
6872         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6873         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6874         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6875         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6876
6877         bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
6878
6879         /* QM queues pointers table */
6880         bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6881
6882         /* soft reset pulse */
6883         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6884         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6885
6886         if (CNIC_SUPPORT(bp))
6887                 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
6888
6889         bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
6890         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
6891         if (!CHIP_REV_IS_SLOW(bp))
6892                 /* enable hw interrupt from doorbell Q */
6893                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6894
6895         bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6896
6897         bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6898         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6899
6900         if (!CHIP_IS_E1(bp))
6901                 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6902
6903         if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6904                 if (IS_MF_AFEX(bp)) {
6905                         /* configure that VNTag and VLAN headers must be
6906                          * received in afex mode
6907                          */
6908                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6909                         REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6910                         REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6911                         REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6912                         REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6913                 } else {
6914                         /* Bit-map indicating which L2 hdrs may appear
6915                          * after the basic Ethernet header
6916                          */
6917                         REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6918                                bp->path_has_ovlan ? 7 : 6);
6919                 }
6920         }
6921
6922         bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6923         bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6924         bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6925         bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
6926
6927         if (!CHIP_IS_E1x(bp)) {
6928                 /* reset VFC memories */
6929                 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6930                            VFC_MEMORIES_RST_REG_CAM_RST |
6931                            VFC_MEMORIES_RST_REG_RAM_RST);
6932                 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6933                            VFC_MEMORIES_RST_REG_CAM_RST |
6934                            VFC_MEMORIES_RST_REG_RAM_RST);
6935
6936                 msleep(20);
6937         }
6938
6939         bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6940         bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6941         bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6942         bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
6943
6944         /* sync semi rtc */
6945         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6946                0x80000000);
6947         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6948                0x80000000);
6949
6950         bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6951         bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6952         bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6953
6954         if (!CHIP_IS_E1x(bp)) {
6955                 if (IS_MF_AFEX(bp)) {
6956                         /* configure that VNTag and VLAN headers must be
6957                          * sent in afex mode
6958                          */
6959                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6960                         REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6961                         REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6962                         REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6963                         REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6964                 } else {
6965                         REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6966                                bp->path_has_ovlan ? 7 : 6);
6967                 }
6968         }
6969
6970         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6971
6972         bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6973
6974         if (CNIC_SUPPORT(bp)) {
6975                 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6976                 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6977                 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6978                 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6979                 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6980                 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6981                 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6982                 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6983                 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6984                 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6985         }
6986         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6987
6988         if (sizeof(union cdu_context) != 1024)
6989                 /* we currently assume that a context is 1024 bytes */
6990                 dev_alert(&bp->pdev->dev,
6991                           "please adjust the size of cdu_context(%ld)\n",
6992                           (long)sizeof(union cdu_context));
6993
6994         bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
6995         val = (4 << 24) + (0 << 12) + 1024;
6996         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6997
6998         bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
6999         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7000         /* enable context validation interrupt from CFC */
7001         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7002
7003         /* set the thresholds to prevent CFC/CDU race */
7004         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7005
7006         bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7007
7008         if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7009                 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7010
7011         bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7012         bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7013
7014         /* Reset PCIE errors for debug */
7015         REG_WR(bp, 0x2814, 0xffffffff);
7016         REG_WR(bp, 0x3820, 0xffffffff);
7017
7018         if (!CHIP_IS_E1x(bp)) {
7019                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7020                            (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7021                                 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7022                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7023                            (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7024                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7025                                 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7026                 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7027                            (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7028                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7029                                 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7030         }
7031
7032         bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7033         if (!CHIP_IS_E1(bp)) {
7034                 /* in E3 this done in per-port section */
7035                 if (!CHIP_IS_E3(bp))
7036                         REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7037         }
7038         if (CHIP_IS_E1H(bp))
7039                 /* not applicable for E2 (and above ...) */
7040                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7041
7042         if (CHIP_REV_IS_SLOW(bp))
7043                 msleep(200);
7044
7045         /* finish CFC init */
7046         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7047         if (val != 1) {
7048                 BNX2X_ERR("CFC LL_INIT failed\n");
7049                 return -EBUSY;
7050         }
7051         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7052         if (val != 1) {
7053                 BNX2X_ERR("CFC AC_INIT failed\n");
7054                 return -EBUSY;
7055         }
7056         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7057         if (val != 1) {
7058                 BNX2X_ERR("CFC CAM_INIT failed\n");
7059                 return -EBUSY;
7060         }
7061         REG_WR(bp, CFC_REG_DEBUG0, 0);
7062
7063         if (CHIP_IS_E1(bp)) {
7064                 /* read NIG statistic
7065                    to see if this is our first up since powerup */
7066                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7067                 val = *bnx2x_sp(bp, wb_data[0]);
7068
7069                 /* do internal memory self test */
7070                 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7071                         BNX2X_ERR("internal mem self test failed\n");
7072                         return -EBUSY;
7073                 }
7074         }
7075
7076         bnx2x_setup_fan_failure_detection(bp);
7077
7078         /* clear PXP2 attentions */
7079         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7080
7081         bnx2x_enable_blocks_attention(bp);
7082         bnx2x_enable_blocks_parity(bp);
7083
7084         if (!BP_NOMCP(bp)) {
7085                 if (CHIP_IS_E1x(bp))
7086                         bnx2x__common_init_phy(bp);
7087         } else
7088                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7089
7090         return 0;
7091 }
7092
7093 /**
7094  * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
7095  *
7096  * @bp:         driver handle
7097  */
7098 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7099 {
7100         int rc = bnx2x_init_hw_common(bp);
7101
7102         if (rc)
7103                 return rc;
7104
7105         /* In E2 2-PORT mode, same ext phy is used for the two paths */
7106         if (!BP_NOMCP(bp))
7107                 bnx2x__common_init_phy(bp);
7108
7109         return 0;
7110 }
7111
7112 static int bnx2x_init_hw_port(struct bnx2x *bp)
7113 {
7114         int port = BP_PORT(bp);
7115         int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7116         u32 low, high;
7117         u32 val;
7118
7119         DP(NETIF_MSG_HW, "starting port init  port %d\n", port);
7120
7121         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7122
7123         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7124         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7125         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7126
7127         /* Timers bug workaround: disables the pf_master bit in pglue at
7128          * common phase, we need to enable it here before any dmae access are
7129          * attempted. Therefore we manually added the enable-master to the
7130          * port phase (it also happens in the function phase)
7131          */
7132         if (!CHIP_IS_E1x(bp))
7133                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7134
7135         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7136         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7137         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7138         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7139
7140         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7141         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7142         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7143         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7144
7145         /* QM cid (connection) count */
7146         bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7147
7148         if (CNIC_SUPPORT(bp)) {
7149                 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7150                 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7151                 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7152         }
7153
7154         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7155
7156         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7157
7158         if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7159
7160                 if (IS_MF(bp))
7161                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7162                 else if (bp->dev->mtu > 4096) {
7163                         if (bp->flags & ONE_PORT_FLAG)
7164                                 low = 160;
7165                         else {
7166                                 val = bp->dev->mtu;
7167                                 /* (24*1024 + val*4)/256 */
7168                                 low = 96 + (val/64) +
7169                                                 ((val % 64) ? 1 : 0);
7170                         }
7171                 } else
7172                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7173                 high = low + 56;        /* 14*1024/256 */
7174                 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7175                 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7176         }
7177
7178         if (CHIP_MODE_IS_4_PORT(bp))
7179                 REG_WR(bp, (BP_PORT(bp) ?
7180                             BRB1_REG_MAC_GUARANTIED_1 :
7181                             BRB1_REG_MAC_GUARANTIED_0), 40);
7182
7183         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7184         if (CHIP_IS_E3B0(bp)) {
7185                 if (IS_MF_AFEX(bp)) {
7186                         /* configure headers for AFEX mode */
7187                         REG_WR(bp, BP_PORT(bp) ?
7188                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7189                                PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7190                         REG_WR(bp, BP_PORT(bp) ?
7191                                PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7192                                PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7193                         REG_WR(bp, BP_PORT(bp) ?
7194                                PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7195                                PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7196                 } else {
7197                         /* Ovlan exists only if we are in multi-function +
7198                          * switch-dependent mode, in switch-independent there
7199                          * is no ovlan headers
7200                          */
7201                         REG_WR(bp, BP_PORT(bp) ?
7202                                PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7203                                PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7204                                (bp->path_has_ovlan ? 7 : 6));
7205                 }
7206         }
7207
7208         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7209         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7210         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7211         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7212
7213         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7214         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7215         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7216         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7217
7218         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7219         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7220
7221         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7222
7223         if (CHIP_IS_E1x(bp)) {
7224                 /* configure PBF to work without PAUSE mtu 9000 */
7225                 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7226
7227                 /* update threshold */
7228                 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7229                 /* update init credit */
7230                 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7231
7232                 /* probe changes */
7233                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7234                 udelay(50);
7235                 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7236         }
7237
7238         if (CNIC_SUPPORT(bp))
7239                 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7240
7241         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7242         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7243
7244         if (CHIP_IS_E1(bp)) {
7245                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7246                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7247         }
7248         bnx2x_init_block(bp, BLOCK_HC, init_phase);
7249
7250         bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7251
7252         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7253         /* init aeu_mask_attn_func_0/1:
7254          *  - SF mode: bits 3-7 are masked. Only bits 0-2 are in use
7255          *  - MF mode: bit 3 is masked. Bits 0-2 are in use as in SF
7256          *             bits 4-7 are used for "per vn group attention" */
7257         val = IS_MF(bp) ? 0xF7 : 0x7;
7258         /* Enable DCBX attention for all but E1 */
7259         val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7260         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7261
7262         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7263
7264         if (!CHIP_IS_E1x(bp)) {
7265                 /* Bit-map indicating which L2 hdrs may appear after the
7266                  * basic Ethernet header
7267                  */
7268                 if (IS_MF_AFEX(bp))
7269                         REG_WR(bp, BP_PORT(bp) ?
7270                                NIG_REG_P1_HDRS_AFTER_BASIC :
7271                                NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7272                 else
7273                         REG_WR(bp, BP_PORT(bp) ?
7274                                NIG_REG_P1_HDRS_AFTER_BASIC :
7275                                NIG_REG_P0_HDRS_AFTER_BASIC,
7276                                IS_MF_SD(bp) ? 7 : 6);
7277
7278                 if (CHIP_IS_E3(bp))
7279                         REG_WR(bp, BP_PORT(bp) ?
7280                                    NIG_REG_LLH1_MF_MODE :
7281                                    NIG_REG_LLH_MF_MODE, IS_MF(bp));
7282         }
7283         if (!CHIP_IS_E3(bp))
7284                 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7285
7286         if (!CHIP_IS_E1(bp)) {
7287                 /* 0x2 disable mf_ov, 0x1 enable */
7288                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7289                        (IS_MF_SD(bp) ? 0x1 : 0x2));
7290
7291                 if (!CHIP_IS_E1x(bp)) {
7292                         val = 0;
7293                         switch (bp->mf_mode) {
7294                         case MULTI_FUNCTION_SD:
7295                                 val = 1;
7296                                 break;
7297                         case MULTI_FUNCTION_SI:
7298                         case MULTI_FUNCTION_AFEX:
7299                                 val = 2;
7300                                 break;
7301                         }
7302
7303                         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7304                                                   NIG_REG_LLH0_CLS_TYPE), val);
7305                 }
7306                 {
7307                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7308                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7309                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7310                 }
7311         }
7312
7313         /* If SPIO5 is set to generate interrupts, enable it for this port */
7314         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7315         if (val & MISC_SPIO_SPIO5) {
7316                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7317                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7318                 val = REG_RD(bp, reg_addr);
7319                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7320                 REG_WR(bp, reg_addr, val);
7321         }
7322
7323         return 0;
7324 }
7325
7326 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7327 {
7328         int reg;
7329         u32 wb_write[2];
7330
7331         if (CHIP_IS_E1(bp))
7332                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7333         else
7334                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7335
7336         wb_write[0] = ONCHIP_ADDR1(addr);
7337         wb_write[1] = ONCHIP_ADDR2(addr);
7338         REG_WR_DMAE(bp, reg, wb_write, 2);
7339 }
7340
7341 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7342 {
7343         u32 data, ctl, cnt = 100;
7344         u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7345         u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7346         u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7347         u32 sb_bit =  1 << (idu_sb_id%32);
7348         u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7349         u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7350
7351         /* Not supported in BC mode */
7352         if (CHIP_INT_MODE_IS_BC(bp))
7353                 return;
7354
7355         data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7356                         << IGU_REGULAR_CLEANUP_TYPE_SHIFT)      |
7357                 IGU_REGULAR_CLEANUP_SET                         |
7358                 IGU_REGULAR_BCLEANUP;
7359
7360         ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT         |
7361               func_encode << IGU_CTRL_REG_FID_SHIFT             |
7362               IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7363
7364         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7365                          data, igu_addr_data);
7366         REG_WR(bp, igu_addr_data, data);
7367         mmiowb();
7368         barrier();
7369         DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7370                           ctl, igu_addr_ctl);
7371         REG_WR(bp, igu_addr_ctl, ctl);
7372         mmiowb();
7373         barrier();
7374
7375         /* wait for clean up to finish */
7376         while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7377                 msleep(20);
7378
7379         if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7380                 DP(NETIF_MSG_HW,
7381                    "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7382                           idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7383         }
7384 }
7385
7386 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7387 {
7388         bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
7389 }
7390
7391 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7392 {
7393         u32 i, base = FUNC_ILT_BASE(func);
7394         for (i = base; i < base + ILT_PER_FUNC; i++)
7395                 bnx2x_ilt_wr(bp, i, 0);
7396 }
7397
7398 static void bnx2x_init_searcher(struct bnx2x *bp)
7399 {
7400         int port = BP_PORT(bp);
7401         bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7402         /* T1 hash bits value determines the T1 number of entries */
7403         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7404 }
7405
7406 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7407 {
7408         int rc;
7409         struct bnx2x_func_state_params func_params = {NULL};
7410         struct bnx2x_func_switch_update_params *switch_update_params =
7411                 &func_params.params.switch_update;
7412
7413         /* Prepare parameters for function state transitions */
7414         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7415         __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7416
7417         func_params.f_obj = &bp->func_obj;
7418         func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7419
7420         /* Function parameters */
7421         switch_update_params->suspend = suspend;
7422
7423         rc = bnx2x_func_state_change(bp, &func_params);
7424
7425         return rc;
7426 }
7427
7428 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7429 {
7430         int rc, i, port = BP_PORT(bp);
7431         int vlan_en = 0, mac_en[NUM_MACS];
7432
7433         /* Close input from network */
7434         if (bp->mf_mode == SINGLE_FUNCTION) {
7435                 bnx2x_set_rx_filter(&bp->link_params, 0);
7436         } else {
7437                 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7438                                    NIG_REG_LLH0_FUNC_EN);
7439                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7440                           NIG_REG_LLH0_FUNC_EN, 0);
7441                 for (i = 0; i < NUM_MACS; i++) {
7442                         mac_en[i] = REG_RD(bp, port ?
7443                                              (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7444                                               4 * i) :
7445                                              (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7446                                               4 * i));
7447                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7448                                               4 * i) :
7449                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7450                 }
7451         }
7452
7453         /* Close BMC to host */
7454         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7455                NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7456
7457         /* Suspend Tx switching to the PF. Completion of this ramrod
7458          * further guarantees that all the packets of that PF / child
7459          * VFs in BRB were processed by the Parser, so it is safe to
7460          * change the NIC_MODE register.
7461          */
7462         rc = bnx2x_func_switch_update(bp, 1);
7463         if (rc) {
7464                 BNX2X_ERR("Can't suspend tx-switching!\n");
7465                 return rc;
7466         }
7467
7468         /* Change NIC_MODE register */
7469         REG_WR(bp, PRS_REG_NIC_MODE, 0);
7470
7471         /* Open input from network */
7472         if (bp->mf_mode == SINGLE_FUNCTION) {
7473                 bnx2x_set_rx_filter(&bp->link_params, 1);
7474         } else {
7475                 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7476                           NIG_REG_LLH0_FUNC_EN, vlan_en);
7477                 for (i = 0; i < NUM_MACS; i++) {
7478                         REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7479                                               4 * i) :
7480                                   (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7481                                   mac_en[i]);
7482                 }
7483         }
7484
7485         /* Enable BMC to host */
7486         REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7487                NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7488
7489         /* Resume Tx switching to the PF */
7490         rc = bnx2x_func_switch_update(bp, 0);
7491         if (rc) {
7492                 BNX2X_ERR("Can't resume tx-switching!\n");
7493                 return rc;
7494         }
7495
7496         DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7497         return 0;
7498 }
7499
7500 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7501 {
7502         int rc;
7503
7504         bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7505
7506         if (CONFIGURE_NIC_MODE(bp)) {
7507                 /* Configure searcher as part of function hw init */
7508                 bnx2x_init_searcher(bp);
7509
7510                 /* Reset NIC mode */
7511                 rc = bnx2x_reset_nic_mode(bp);
7512                 if (rc)
7513                         BNX2X_ERR("Can't change NIC mode!\n");
7514                 return rc;
7515         }
7516
7517         return 0;
7518 }
7519
7520 static int bnx2x_init_hw_func(struct bnx2x *bp)
7521 {
7522         int port = BP_PORT(bp);
7523         int func = BP_FUNC(bp);
7524         int init_phase = PHASE_PF0 + func;
7525         struct bnx2x_ilt *ilt = BP_ILT(bp);
7526         u16 cdu_ilt_start;
7527         u32 addr, val;
7528         u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7529         int i, main_mem_width, rc;
7530
7531         DP(NETIF_MSG_HW, "starting func init  func %d\n", func);
7532
7533         /* FLR cleanup - hmmm */
7534         if (!CHIP_IS_E1x(bp)) {
7535                 rc = bnx2x_pf_flr_clnup(bp);
7536                 if (rc) {
7537                         bnx2x_fw_dump(bp);
7538                         return rc;
7539                 }
7540         }
7541
7542         /* set MSI reconfigure capability */
7543         if (bp->common.int_block == INT_BLOCK_HC) {
7544                 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7545                 val = REG_RD(bp, addr);
7546                 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7547                 REG_WR(bp, addr, val);
7548         }
7549
7550         bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7551         bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7552
7553         ilt = BP_ILT(bp);
7554         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7555
7556         if (IS_SRIOV(bp))
7557                 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7558         cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7559
7560         /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7561          * those of the VFs, so start line should be reset
7562          */
7563         cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7564         for (i = 0; i < L2_ILT_LINES(bp); i++) {
7565                 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7566                 ilt->lines[cdu_ilt_start + i].page_mapping =
7567                         bp->context[i].cxt_mapping;
7568                 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
7569         }
7570
7571         bnx2x_ilt_init_op(bp, INITOP_SET);
7572
7573         if (!CONFIGURE_NIC_MODE(bp)) {
7574                 bnx2x_init_searcher(bp);
7575                 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7576                 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7577         } else {
7578                 /* Set NIC mode */
7579                 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7580                 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
7581         }
7582
7583         if (!CHIP_IS_E1x(bp)) {
7584                 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7585
7586                 /* Turn on a single ISR mode in IGU if driver is going to use
7587                  * INT#x or MSI
7588                  */
7589                 if (!(bp->flags & USING_MSIX_FLAG))
7590                         pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7591                 /*
7592                  * Timers workaround bug: function init part.
7593                  * Need to wait 20msec after initializing ILT,
7594                  * needed to make sure there are no requests in
7595                  * one of the PXP internal queues with "old" ILT addresses
7596                  */
7597                 msleep(20);
7598                 /*
7599                  * Master enable - Due to WB DMAE writes performed before this
7600                  * register is re-initialized as part of the regular function
7601                  * init
7602                  */
7603                 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7604                 /* Enable the function in IGU */
7605                 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7606         }
7607
7608         bp->dmae_ready = 1;
7609
7610         bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7611
7612         if (!CHIP_IS_E1x(bp))
7613                 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7614
7615         bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7616         bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7617         bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7618         bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7619         bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7620         bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7621         bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7622         bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7623         bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7624         bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7625         bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7626         bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7627         bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7628
7629         if (!CHIP_IS_E1x(bp))
7630                 REG_WR(bp, QM_REG_PF_EN, 1);
7631
7632         if (!CHIP_IS_E1x(bp)) {
7633                 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7634                 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7635                 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7636                 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7637         }
7638         bnx2x_init_block(bp, BLOCK_QM, init_phase);
7639
7640         bnx2x_init_block(bp, BLOCK_TM, init_phase);
7641         bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7642
7643         bnx2x_iov_init_dq(bp);
7644
7645         bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7646         bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7647         bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7648         bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7649         bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7650         bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7651         bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7652         bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7653         bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7654         if (!CHIP_IS_E1x(bp))
7655                 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7656
7657         bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7658
7659         bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7660
7661         if (!CHIP_IS_E1x(bp))
7662                 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7663
7664         if (IS_MF(bp)) {
7665                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7666                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
7667         }
7668
7669         bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7670
7671         /* HC init per function */
7672         if (bp->common.int_block == INT_BLOCK_HC) {
7673                 if (CHIP_IS_E1H(bp)) {
7674                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7675
7676                         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7677                         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7678                 }
7679                 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7680
7681         } else {
7682                 int num_segs, sb_idx, prod_offset;
7683
7684                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7685
7686                 if (!CHIP_IS_E1x(bp)) {
7687                         REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7688                         REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7689                 }
7690
7691                 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7692
7693                 if (!CHIP_IS_E1x(bp)) {
7694                         int dsb_idx = 0;
7695                         /**
7696                          * Producer memory:
7697                          * E2 mode: address 0-135 match to the mapping memory;
7698                          * 136 - PF0 default prod; 137 - PF1 default prod;
7699                          * 138 - PF2 default prod; 139 - PF3 default prod;
7700                          * 140 - PF0 attn prod;    141 - PF1 attn prod;
7701                          * 142 - PF2 attn prod;    143 - PF3 attn prod;
7702                          * 144-147 reserved.
7703                          *
7704                          * E1.5 mode - In backward compatible mode;
7705                          * for non default SB; each even line in the memory
7706                          * holds the U producer and each odd line hold
7707                          * the C producer. The first 128 producers are for
7708                          * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7709                          * producers are for the DSB for each PF.
7710                          * Each PF has five segments: (the order inside each
7711                          * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7712                          * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7713                          * 144-147 attn prods;
7714                          */
7715                         /* non-default-status-blocks */
7716                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7717                                 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7718                         for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7719                                 prod_offset = (bp->igu_base_sb + sb_idx) *
7720                                         num_segs;
7721
7722                                 for (i = 0; i < num_segs; i++) {
7723                                         addr = IGU_REG_PROD_CONS_MEMORY +
7724                                                         (prod_offset + i) * 4;
7725                                         REG_WR(bp, addr, 0);
7726                                 }
7727                                 /* send consumer update with value 0 */
7728                                 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7729                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7730                                 bnx2x_igu_clear_sb(bp,
7731                                                    bp->igu_base_sb + sb_idx);
7732                         }
7733
7734                         /* default-status-blocks */
7735                         num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7736                                 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7737
7738                         if (CHIP_MODE_IS_4_PORT(bp))
7739                                 dsb_idx = BP_FUNC(bp);
7740                         else
7741                                 dsb_idx = BP_VN(bp);
7742
7743                         prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7744                                        IGU_BC_BASE_DSB_PROD + dsb_idx :
7745                                        IGU_NORM_BASE_DSB_PROD + dsb_idx);
7746
7747                         /*
7748                          * igu prods come in chunks of E1HVN_MAX (4) -
7749                          * does not matters what is the current chip mode
7750                          */
7751                         for (i = 0; i < (num_segs * E1HVN_MAX);
7752                              i += E1HVN_MAX) {
7753                                 addr = IGU_REG_PROD_CONS_MEMORY +
7754                                                         (prod_offset + i)*4;
7755                                 REG_WR(bp, addr, 0);
7756                         }
7757                         /* send consumer update with 0 */
7758                         if (CHIP_INT_MODE_IS_BC(bp)) {
7759                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7760                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7761                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7762                                              CSTORM_ID, 0, IGU_INT_NOP, 1);
7763                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7764                                              XSTORM_ID, 0, IGU_INT_NOP, 1);
7765                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7766                                              TSTORM_ID, 0, IGU_INT_NOP, 1);
7767                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7768                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
7769                         } else {
7770                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7771                                              USTORM_ID, 0, IGU_INT_NOP, 1);
7772                                 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7773                                              ATTENTION_ID, 0, IGU_INT_NOP, 1);
7774                         }
7775                         bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7776
7777                         /* !!! These should become driver const once
7778                            rf-tool supports split-68 const */
7779                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7780                         REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7781                         REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7782                         REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7783                         REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7784                         REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7785                 }
7786         }
7787
7788         /* Reset PCIE errors for debug */
7789         REG_WR(bp, 0x2114, 0xffffffff);
7790         REG_WR(bp, 0x2120, 0xffffffff);
7791
7792         if (CHIP_IS_E1x(bp)) {
7793                 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7794                 main_mem_base = HC_REG_MAIN_MEMORY +
7795                                 BP_PORT(bp) * (main_mem_size * 4);
7796                 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7797                 main_mem_width = 8;
7798
7799                 val = REG_RD(bp, main_mem_prty_clr);
7800                 if (val)
7801                         DP(NETIF_MSG_HW,
7802                            "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7803                            val);
7804
7805                 /* Clear "false" parity errors in MSI-X table */
7806                 for (i = main_mem_base;
7807                      i < main_mem_base + main_mem_size * 4;
7808                      i += main_mem_width) {
7809                         bnx2x_read_dmae(bp, i, main_mem_width / 4);
7810                         bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7811                                          i, main_mem_width / 4);
7812                 }
7813                 /* Clear HC parity attention */
7814                 REG_RD(bp, main_mem_prty_clr);
7815         }
7816
7817 #ifdef BNX2X_STOP_ON_ERROR
7818         /* Enable STORMs SP logging */
7819         REG_WR8(bp, BAR_USTRORM_INTMEM +
7820                USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7821         REG_WR8(bp, BAR_TSTRORM_INTMEM +
7822                TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7823         REG_WR8(bp, BAR_CSTRORM_INTMEM +
7824                CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7825         REG_WR8(bp, BAR_XSTRORM_INTMEM +
7826                XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7827 #endif
7828
7829         bnx2x_phy_probe(&bp->link_params);
7830
7831         return 0;
7832 }
7833
7834 void bnx2x_free_mem_cnic(struct bnx2x *bp)
7835 {
7836         bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7837
7838         if (!CHIP_IS_E1x(bp))
7839                 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7840                                sizeof(struct host_hc_status_block_e2));
7841         else
7842                 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7843                                sizeof(struct host_hc_status_block_e1x));
7844
7845         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7846 }
7847
7848 void bnx2x_free_mem(struct bnx2x *bp)
7849 {
7850         int i;
7851
7852         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7853                        sizeof(struct host_sp_status_block));
7854
7855         BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7856                        bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7857
7858         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7859                        sizeof(struct bnx2x_slowpath));
7860
7861         for (i = 0; i < L2_ILT_LINES(bp); i++)
7862                 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7863                                bp->context[i].size);
7864         bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7865
7866         BNX2X_FREE(bp->ilt->lines);
7867
7868         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7869
7870         BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7871                        BCM_PAGE_SIZE * NUM_EQ_PAGES);
7872
7873         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7874
7875         bnx2x_iov_free_mem(bp);
7876 }
7877
7878 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
7879 {
7880         if (!CHIP_IS_E1x(bp))
7881                 /* size = the status block + ramrod buffers */
7882                 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7883                                 sizeof(struct host_hc_status_block_e2));
7884         else
7885                 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7886                                 &bp->cnic_sb_mapping,
7887                                 sizeof(struct
7888                                        host_hc_status_block_e1x));
7889
7890         if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
7891                 /* allocate searcher T2 table, as it wasn't allocated before */
7892                 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7893
7894         /* write address to which L5 should insert its values */
7895         bp->cnic_eth_dev.addr_drv_info_to_mcp =
7896                 &bp->slowpath->drv_info_to_mcp;
7897
7898         if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7899                 goto alloc_mem_err;
7900
7901         return 0;
7902
7903 alloc_mem_err:
7904         bnx2x_free_mem_cnic(bp);
7905         BNX2X_ERR("Can't allocate memory\n");
7906         return -ENOMEM;
7907 }
7908
7909 int bnx2x_alloc_mem(struct bnx2x *bp)
7910 {
7911         int i, allocated, context_size;
7912
7913         if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
7914                 /* allocate searcher T2 table */
7915                 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7916
7917         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7918                         sizeof(struct host_sp_status_block));
7919
7920         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7921                         sizeof(struct bnx2x_slowpath));
7922
7923         /* Allocate memory for CDU context:
7924          * This memory is allocated separately and not in the generic ILT
7925          * functions because CDU differs in few aspects:
7926          * 1. There are multiple entities allocating memory for context -
7927          * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7928          * its own ILT lines.
7929          * 2. Since CDU page-size is not a single 4KB page (which is the case
7930          * for the other ILT clients), to be efficient we want to support
7931          * allocation of sub-page-size in the last entry.
7932          * 3. Context pointers are used by the driver to pass to FW / update
7933          * the context (for the other ILT clients the pointers are used just to
7934          * free the memory during unload).
7935          */
7936         context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
7937
7938         for (i = 0, allocated = 0; allocated < context_size; i++) {
7939                 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7940                                           (context_size - allocated));
7941                 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7942                                 &bp->context[i].cxt_mapping,
7943                                 bp->context[i].size);
7944                 allocated += bp->context[i].size;
7945         }
7946         BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
7947
7948         if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7949                 goto alloc_mem_err;
7950
7951         if (bnx2x_iov_alloc_mem(bp))
7952                 goto alloc_mem_err;
7953
7954         /* Slow path ring */
7955         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7956
7957         /* EQ */
7958         BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7959                         BCM_PAGE_SIZE * NUM_EQ_PAGES);
7960
7961         return 0;
7962
7963 alloc_mem_err:
7964         bnx2x_free_mem(bp);
7965         BNX2X_ERR("Can't allocate memory\n");
7966         return -ENOMEM;
7967 }
7968
7969 /*
7970  * Init service functions
7971  */
7972
7973 int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7974                       struct bnx2x_vlan_mac_obj *obj, bool set,
7975                       int mac_type, unsigned long *ramrod_flags)
7976 {
7977         int rc;
7978         struct bnx2x_vlan_mac_ramrod_params ramrod_param;
7979
7980         memset(&ramrod_param, 0, sizeof(ramrod_param));
7981
7982         /* Fill general parameters */
7983         ramrod_param.vlan_mac_obj = obj;
7984         ramrod_param.ramrod_flags = *ramrod_flags;
7985
7986         /* Fill a user request section if needed */
7987         if (!test_bit(RAMROD_CONT, ramrod_flags)) {
7988                 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
7989
7990                 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
7991
7992                 /* Set the command: ADD or DEL */
7993                 if (set)
7994                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
7995                 else
7996                         ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
7997         }
7998
7999         rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8000
8001         if (rc == -EEXIST) {
8002                 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8003                 /* do not treat adding same MAC as error */
8004                 rc = 0;
8005         } else if (rc < 0)
8006                 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8007
8008         return rc;
8009 }
8010
8011 int bnx2x_del_all_macs(struct bnx2x *bp,
8012                        struct bnx2x_vlan_mac_obj *mac_obj,
8013                        int mac_type, bool wait_for_comp)
8014 {
8015         int rc;
8016         unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8017
8018         /* Wait for completion of requested */
8019         if (wait_for_comp)
8020                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8021
8022         /* Set the mac type of addresses we want to clear */
8023         __set_bit(mac_type, &vlan_mac_flags);
8024
8025         rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8026         if (rc < 0)
8027                 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8028
8029         return rc;
8030 }
8031
8032 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8033 {
8034         if (is_zero_ether_addr(bp->dev->dev_addr) &&
8035             (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
8036                 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
8037                    "Ignoring Zero MAC for STORAGE SD mode\n");
8038                 return 0;
8039         }
8040
8041         if (IS_PF(bp)) {
8042                 unsigned long ramrod_flags = 0;
8043
8044                 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8045                 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8046                 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8047                                          &bp->sp_objs->mac_obj, set,
8048                                          BNX2X_ETH_MAC, &ramrod_flags);
8049         } else { /* vf */
8050                 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8051                                              bp->fp->index, true);
8052         }
8053 }
8054
8055 int bnx2x_setup_leading(struct bnx2x *bp)
8056 {
8057         return bnx2x_setup_queue(bp, &bp->fp[0], 1);
8058 }
8059
8060 /**
8061  * bnx2x_set_int_mode - configure interrupt mode
8062  *
8063  * @bp:         driver handle
8064  *
8065  * In case of MSI-X it will also try to enable MSI-X.
8066  */
8067 int bnx2x_set_int_mode(struct bnx2x *bp)
8068 {
8069         int rc = 0;
8070
8071         if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
8072                 return -EINVAL;
8073
8074         switch (int_mode) {
8075         case BNX2X_INT_MODE_MSIX:
8076                 /* attempt to enable msix */
8077                 rc = bnx2x_enable_msix(bp);
8078
8079                 /* msix attained */
8080                 if (!rc)
8081                         return 0;
8082
8083                 /* vfs use only msix */
8084                 if (rc && IS_VF(bp))
8085                         return rc;
8086
8087                 /* failed to enable multiple MSI-X */
8088                 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8089                                bp->num_queues,
8090                                1 + bp->num_cnic_queues);
8091
8092                 /* falling through... */
8093         case BNX2X_INT_MODE_MSI:
8094                 bnx2x_enable_msi(bp);
8095
8096                 /* falling through... */
8097         case BNX2X_INT_MODE_INTX:
8098                 bp->num_ethernet_queues = 1;
8099                 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8100                 BNX2X_DEV_INFO("set number of queues to 1\n");
8101                 break;
8102         default:
8103                 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8104                 return -EINVAL;
8105         }
8106         return 0;
8107 }
8108
8109 /* must be called prior to any HW initializations */
8110 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8111 {
8112         if (IS_SRIOV(bp))
8113                 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8114         return L2_ILT_LINES(bp);
8115 }
8116
8117 void bnx2x_ilt_set_info(struct bnx2x *bp)
8118 {
8119         struct ilt_client_info *ilt_client;
8120         struct bnx2x_ilt *ilt = BP_ILT(bp);
8121         u16 line = 0;
8122
8123         ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8124         DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8125
8126         /* CDU */
8127         ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8128         ilt_client->client_num = ILT_CLIENT_CDU;
8129         ilt_client->page_size = CDU_ILT_PAGE_SZ;
8130         ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8131         ilt_client->start = line;
8132         line += bnx2x_cid_ilt_lines(bp);
8133
8134         if (CNIC_SUPPORT(bp))
8135                 line += CNIC_ILT_LINES;
8136         ilt_client->end = line - 1;
8137
8138         DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8139            ilt_client->start,
8140            ilt_client->end,
8141            ilt_client->page_size,
8142            ilt_client->flags,
8143            ilog2(ilt_client->page_size >> 12));
8144
8145         /* QM */
8146         if (QM_INIT(bp->qm_cid_count)) {
8147                 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8148                 ilt_client->client_num = ILT_CLIENT_QM;
8149                 ilt_client->page_size = QM_ILT_PAGE_SZ;
8150                 ilt_client->flags = 0;
8151                 ilt_client->start = line;
8152
8153                 /* 4 bytes for each cid */
8154                 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8155                                                          QM_ILT_PAGE_SZ);
8156
8157                 ilt_client->end = line - 1;
8158
8159                 DP(NETIF_MSG_IFUP,
8160                    "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8161                    ilt_client->start,
8162                    ilt_client->end,
8163                    ilt_client->page_size,
8164                    ilt_client->flags,
8165                    ilog2(ilt_client->page_size >> 12));
8166         }
8167
8168         if (CNIC_SUPPORT(bp)) {
8169                 /* SRC */
8170                 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8171                 ilt_client->client_num = ILT_CLIENT_SRC;
8172                 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8173                 ilt_client->flags = 0;
8174                 ilt_client->start = line;
8175                 line += SRC_ILT_LINES;
8176                 ilt_client->end = line - 1;
8177
8178                 DP(NETIF_MSG_IFUP,
8179                    "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8180                    ilt_client->start,
8181                    ilt_client->end,
8182                    ilt_client->page_size,
8183                    ilt_client->flags,
8184                    ilog2(ilt_client->page_size >> 12));
8185
8186                 /* TM */
8187                 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8188                 ilt_client->client_num = ILT_CLIENT_TM;
8189                 ilt_client->page_size = TM_ILT_PAGE_SZ;
8190                 ilt_client->flags = 0;
8191                 ilt_client->start = line;
8192                 line += TM_ILT_LINES;
8193                 ilt_client->end = line - 1;
8194
8195                 DP(NETIF_MSG_IFUP,
8196                    "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8197                    ilt_client->start,
8198                    ilt_client->end,
8199                    ilt_client->page_size,
8200                    ilt_client->flags,
8201                    ilog2(ilt_client->page_size >> 12));
8202         }
8203
8204         BUG_ON(line > ILT_MAX_LINES);
8205 }
8206
8207 /**
8208  * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8209  *
8210  * @bp:                 driver handle
8211  * @fp:                 pointer to fastpath
8212  * @init_params:        pointer to parameters structure
8213  *
8214  * parameters configured:
8215  *      - HC configuration
8216  *      - Queue's CDU context
8217  */
8218 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8219         struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8220 {
8221         u8 cos;
8222         int cxt_index, cxt_offset;
8223
8224         /* FCoE Queue uses Default SB, thus has no HC capabilities */
8225         if (!IS_FCOE_FP(fp)) {
8226                 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8227                 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8228
8229                 /* If HC is supported, enable host coalescing in the transition
8230                  * to INIT state.
8231                  */
8232                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8233                 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8234
8235                 /* HC rate */
8236                 init_params->rx.hc_rate = bp->rx_ticks ?
8237                         (1000000 / bp->rx_ticks) : 0;
8238                 init_params->tx.hc_rate = bp->tx_ticks ?
8239                         (1000000 / bp->tx_ticks) : 0;
8240
8241                 /* FW SB ID */
8242                 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8243                         fp->fw_sb_id;
8244
8245                 /*
8246                  * CQ index among the SB indices: FCoE clients uses the default
8247                  * SB, therefore it's different.
8248                  */
8249                 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8250                 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8251         }
8252
8253         /* set maximum number of COSs supported by this queue */
8254         init_params->max_cos = fp->max_cos;
8255
8256         DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8257             fp->index, init_params->max_cos);
8258
8259         /* set the context pointers queue object */
8260         for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8261                 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8262                 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8263                                 ILT_PAGE_CIDS);
8264                 init_params->cxts[cos] =
8265                         &bp->context[cxt_index].vcxt[cxt_offset].eth;
8266         }
8267 }
8268
8269 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8270                         struct bnx2x_queue_state_params *q_params,
8271                         struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8272                         int tx_index, bool leading)
8273 {
8274         memset(tx_only_params, 0, sizeof(*tx_only_params));
8275
8276         /* Set the command */
8277         q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8278
8279         /* Set tx-only QUEUE flags: don't zero statistics */
8280         tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8281
8282         /* choose the index of the cid to send the slow path on */
8283         tx_only_params->cid_index = tx_index;
8284
8285         /* Set general TX_ONLY_SETUP parameters */
8286         bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8287
8288         /* Set Tx TX_ONLY_SETUP parameters */
8289         bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8290
8291         DP(NETIF_MSG_IFUP,
8292            "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8293            tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8294            q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8295            tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8296
8297         /* send the ramrod */
8298         return bnx2x_queue_state_change(bp, q_params);
8299 }
8300
8301 /**
8302  * bnx2x_setup_queue - setup queue
8303  *
8304  * @bp:         driver handle
8305  * @fp:         pointer to fastpath
8306  * @leading:    is leading
8307  *
8308  * This function performs 2 steps in a Queue state machine
8309  *      actually: 1) RESET->INIT 2) INIT->SETUP
8310  */
8311
8312 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8313                        bool leading)
8314 {
8315         struct bnx2x_queue_state_params q_params = {NULL};
8316         struct bnx2x_queue_setup_params *setup_params =
8317                                                 &q_params.params.setup;
8318         struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8319                                                 &q_params.params.tx_only;
8320         int rc;
8321         u8 tx_index;
8322
8323         DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8324
8325         /* reset IGU state skip FCoE L2 queue */
8326         if (!IS_FCOE_FP(fp))
8327                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8328                              IGU_INT_ENABLE, 0);
8329
8330         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8331         /* We want to wait for completion in this context */
8332         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8333
8334         /* Prepare the INIT parameters */
8335         bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8336
8337         /* Set the command */
8338         q_params.cmd = BNX2X_Q_CMD_INIT;
8339
8340         /* Change the state to INIT */
8341         rc = bnx2x_queue_state_change(bp, &q_params);
8342         if (rc) {
8343                 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8344                 return rc;
8345         }
8346
8347         DP(NETIF_MSG_IFUP, "init complete\n");
8348
8349         /* Now move the Queue to the SETUP state... */
8350         memset(setup_params, 0, sizeof(*setup_params));
8351
8352         /* Set QUEUE flags */
8353         setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8354
8355         /* Set general SETUP parameters */
8356         bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8357                                 FIRST_TX_COS_INDEX);
8358
8359         bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8360                             &setup_params->rxq_params);
8361
8362         bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8363                            FIRST_TX_COS_INDEX);
8364
8365         /* Set the command */
8366         q_params.cmd = BNX2X_Q_CMD_SETUP;
8367
8368         if (IS_FCOE_FP(fp))
8369                 bp->fcoe_init = true;
8370
8371         /* Change the state to SETUP */
8372         rc = bnx2x_queue_state_change(bp, &q_params);
8373         if (rc) {
8374                 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8375                 return rc;
8376         }
8377
8378         /* loop through the relevant tx-only indices */
8379         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8380               tx_index < fp->max_cos;
8381               tx_index++) {
8382
8383                 /* prepare and send tx-only ramrod*/
8384                 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8385                                           tx_only_params, tx_index, leading);
8386                 if (rc) {
8387                         BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8388                                   fp->index, tx_index);
8389                         return rc;
8390                 }
8391         }
8392
8393         return rc;
8394 }
8395
8396 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8397 {
8398         struct bnx2x_fastpath *fp = &bp->fp[index];
8399         struct bnx2x_fp_txdata *txdata;
8400         struct bnx2x_queue_state_params q_params = {NULL};
8401         int rc, tx_index;
8402
8403         DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8404
8405         q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8406         /* We want to wait for completion in this context */
8407         __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8408
8409         /* close tx-only connections */
8410         for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8411              tx_index < fp->max_cos;
8412              tx_index++){
8413
8414                 /* ascertain this is a normal queue*/
8415                 txdata = fp->txdata_ptr[tx_index];
8416
8417                 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8418                                                         txdata->txq_index);
8419
8420                 /* send halt terminate on tx-only connection */
8421                 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8422                 memset(&q_params.params.terminate, 0,
8423                        sizeof(q_params.params.terminate));
8424                 q_params.params.terminate.cid_index = tx_index;
8425
8426                 rc = bnx2x_queue_state_change(bp, &q_params);
8427                 if (rc)
8428                         return rc;
8429
8430                 /* send halt terminate on tx-only connection */
8431                 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8432                 memset(&q_params.params.cfc_del, 0,
8433                        sizeof(q_params.params.cfc_del));
8434                 q_params.params.cfc_del.cid_index = tx_index;
8435                 rc = bnx2x_queue_state_change(bp, &q_params);
8436                 if (rc)
8437                         return rc;
8438         }
8439         /* Stop the primary connection: */
8440         /* ...halt the connection */
8441         q_params.cmd = BNX2X_Q_CMD_HALT;
8442         rc = bnx2x_queue_state_change(bp, &q_params);
8443         if (rc)
8444                 return rc;
8445
8446         /* ...terminate the connection */
8447         q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8448         memset(&q_params.params.terminate, 0,
8449                sizeof(q_params.params.terminate));
8450         q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8451         rc = bnx2x_queue_state_change(bp, &q_params);
8452         if (rc)
8453                 return rc;
8454         /* ...delete cfc entry */
8455         q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8456         memset(&q_params.params.cfc_del, 0,
8457                sizeof(q_params.params.cfc_del));
8458         q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8459         return bnx2x_queue_state_change(bp, &q_params);
8460 }
8461
8462 static void bnx2x_reset_func(struct bnx2x *bp)
8463 {
8464         int port = BP_PORT(bp);
8465         int func = BP_FUNC(bp);
8466         int i;
8467
8468         /* Disable the function in the FW */
8469         REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8470         REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8471         REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8472         REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8473
8474         /* FP SBs */
8475         for_each_eth_queue(bp, i) {
8476                 struct bnx2x_fastpath *fp = &bp->fp[i];
8477                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8478                            CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8479                            SB_DISABLED);
8480         }
8481
8482         if (CNIC_LOADED(bp))
8483                 /* CNIC SB */
8484                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8485                         CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8486                         (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8487
8488         /* SP SB */
8489         REG_WR8(bp, BAR_CSTRORM_INTMEM +
8490                 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8491                 SB_DISABLED);
8492
8493         for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8494                 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8495                        0);
8496
8497         /* Configure IGU */
8498         if (bp->common.int_block == INT_BLOCK_HC) {
8499                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8500                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8501         } else {
8502                 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8503                 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8504         }
8505
8506         if (CNIC_LOADED(bp)) {
8507                 /* Disable Timer scan */
8508                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8509                 /*
8510                  * Wait for at least 10ms and up to 2 second for the timers
8511                  * scan to complete
8512                  */
8513                 for (i = 0; i < 200; i++) {
8514                         usleep_range(10000, 20000);
8515                         if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8516                                 break;
8517                 }
8518         }
8519         /* Clear ILT */
8520         bnx2x_clear_func_ilt(bp, func);
8521
8522         /* Timers workaround bug for E2: if this is vnic-3,
8523          * we need to set the entire ilt range for this timers.
8524          */
8525         if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
8526                 struct ilt_client_info ilt_cli;
8527                 /* use dummy TM client */
8528                 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8529                 ilt_cli.start = 0;
8530                 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8531                 ilt_cli.client_num = ILT_CLIENT_TM;
8532
8533                 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8534         }
8535
8536         /* this assumes that reset_port() called before reset_func()*/
8537         if (!CHIP_IS_E1x(bp))
8538                 bnx2x_pf_disable(bp);
8539
8540         bp->dmae_ready = 0;
8541 }
8542
8543 static void bnx2x_reset_port(struct bnx2x *bp)
8544 {
8545         int port = BP_PORT(bp);
8546         u32 val;
8547
8548         /* Reset physical Link */
8549         bnx2x__link_reset(bp);
8550
8551         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8552
8553         /* Do not rcv packets to BRB */
8554         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8555         /* Do not direct rcv packets that are not for MCP to the BRB */
8556         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8557                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8558
8559         /* Configure AEU */
8560         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8561
8562         msleep(100);
8563         /* Check for BRB port occupancy */
8564         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8565         if (val)
8566                 DP(NETIF_MSG_IFDOWN,
8567                    "BRB1 is not empty  %d blocks are occupied\n", val);
8568
8569         /* TODO: Close Doorbell port? */
8570 }
8571
8572 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
8573 {
8574         struct bnx2x_func_state_params func_params = {NULL};
8575
8576         /* Prepare parameters for function state transitions */
8577         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8578
8579         func_params.f_obj = &bp->func_obj;
8580         func_params.cmd = BNX2X_F_CMD_HW_RESET;
8581
8582         func_params.params.hw_init.load_phase = load_code;
8583
8584         return bnx2x_func_state_change(bp, &func_params);
8585 }
8586
8587 static int bnx2x_func_stop(struct bnx2x *bp)
8588 {
8589         struct bnx2x_func_state_params func_params = {NULL};
8590         int rc;
8591
8592         /* Prepare parameters for function state transitions */
8593         __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8594         func_params.f_obj = &bp->func_obj;
8595         func_params.cmd = BNX2X_F_CMD_STOP;
8596
8597         /*
8598          * Try to stop the function the 'good way'. If fails (in case
8599          * of a parity error during bnx2x_chip_cleanup()) and we are
8600          * not in a debug mode, perform a state transaction in order to
8601          * enable further HW_RESET transaction.
8602          */
8603         rc = bnx2x_func_state_change(bp, &func_params);
8604         if (rc) {
8605 #ifdef BNX2X_STOP_ON_ERROR
8606                 return rc;
8607 #else
8608                 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
8609                 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8610                 return bnx2x_func_state_change(bp, &func_params);
8611 #endif
8612         }
8613
8614         return 0;
8615 }
8616
8617 /**
8618  * bnx2x_send_unload_req - request unload mode from the MCP.
8619  *
8620  * @bp:                 driver handle
8621  * @unload_mode:        requested function's unload mode
8622  *
8623  * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8624  */
8625 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8626 {
8627         u32 reset_code = 0;
8628         int port = BP_PORT(bp);
8629
8630         /* Select the UNLOAD request mode */
8631         if (unload_mode == UNLOAD_NORMAL)
8632                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8633
8634         else if (bp->flags & NO_WOL_FLAG)
8635                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8636
8637         else if (bp->wol) {
8638                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8639                 u8 *mac_addr = bp->dev->dev_addr;
8640                 u32 val;
8641                 u16 pmc;
8642
8643                 /* The mac address is written to entries 1-4 to
8644                  * preserve entry 0 which is used by the PMF
8645                  */
8646                 u8 entry = (BP_VN(bp) + 1)*8;
8647
8648                 val = (mac_addr[0] << 8) | mac_addr[1];
8649                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8650
8651                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8652                       (mac_addr[4] << 8) | mac_addr[5];
8653                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8654
8655                 /* Enable the PME and clear the status */
8656                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
8657                 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8658                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
8659
8660                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8661
8662         } else
8663                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8664
8665         /* Send the request to the MCP */
8666         if (!BP_NOMCP(bp))
8667                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8668         else {
8669                 int path = BP_PATH(bp);
8670
8671                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      %d, %d, %d\n",
8672                    path, load_count[path][0], load_count[path][1],
8673                    load_count[path][2]);
8674                 load_count[path][0]--;
8675                 load_count[path][1 + port]--;
8676                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  %d, %d, %d\n",
8677                    path, load_count[path][0], load_count[path][1],
8678                    load_count[path][2]);
8679                 if (load_count[path][0] == 0)
8680                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8681                 else if (load_count[path][1 + port] == 0)
8682                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8683                 else
8684                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8685         }
8686
8687         return reset_code;
8688 }
8689
8690 /**
8691  * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8692  *
8693  * @bp:         driver handle
8694  * @keep_link:          true iff link should be kept up
8695  */
8696 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
8697 {
8698         u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8699
8700         /* Report UNLOAD_DONE to MCP */
8701         if (!BP_NOMCP(bp))
8702                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
8703 }
8704
8705 static int bnx2x_func_wait_started(struct bnx2x *bp)
8706 {
8707         int tout = 50;
8708         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8709
8710         if (!bp->port.pmf)
8711                 return 0;
8712
8713         /*
8714          * (assumption: No Attention from MCP at this stage)
8715          * PMF probably in the middle of TX disable/enable transaction
8716          * 1. Sync IRS for default SB
8717          * 2. Sync SP queue - this guarantees us that attention handling started
8718          * 3. Wait, that TX disable/enable transaction completes
8719          *
8720          * 1+2 guarantee that if DCBx attention was scheduled it already changed
8721          * pending bit of transaction from STARTED-->TX_STOPPED, if we already
8722          * received completion for the transaction the state is TX_STOPPED.
8723          * State will return to STARTED after completion of TX_STOPPED-->STARTED
8724          * transaction.
8725          */
8726
8727         /* make sure default SB ISR is done */
8728         if (msix)
8729                 synchronize_irq(bp->msix_table[0].vector);
8730         else
8731                 synchronize_irq(bp->pdev->irq);
8732
8733         flush_workqueue(bnx2x_wq);
8734
8735         while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8736                                 BNX2X_F_STATE_STARTED && tout--)
8737                 msleep(20);
8738
8739         if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8740                                                 BNX2X_F_STATE_STARTED) {
8741 #ifdef BNX2X_STOP_ON_ERROR
8742                 BNX2X_ERR("Wrong function state\n");
8743                 return -EBUSY;
8744 #else
8745                 /*
8746                  * Failed to complete the transaction in a "good way"
8747                  * Force both transactions with CLR bit
8748                  */
8749                 struct bnx2x_func_state_params func_params = {NULL};
8750
8751                 DP(NETIF_MSG_IFDOWN,
8752                    "Hmmm... Unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
8753
8754                 func_params.f_obj = &bp->func_obj;
8755                 __set_bit(RAMROD_DRV_CLR_ONLY,
8756                                         &func_params.ramrod_flags);
8757
8758                 /* STARTED-->TX_ST0PPED */
8759                 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8760                 bnx2x_func_state_change(bp, &func_params);
8761
8762                 /* TX_ST0PPED-->STARTED */
8763                 func_params.cmd = BNX2X_F_CMD_TX_START;
8764                 return bnx2x_func_state_change(bp, &func_params);
8765 #endif
8766         }
8767
8768         return 0;
8769 }
8770
8771 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
8772 {
8773         int port = BP_PORT(bp);
8774         int i, rc = 0;
8775         u8 cos;
8776         struct bnx2x_mcast_ramrod_params rparam = {NULL};
8777         u32 reset_code;
8778
8779         /* Wait until tx fastpath tasks complete */
8780         for_each_tx_queue(bp, i) {
8781                 struct bnx2x_fastpath *fp = &bp->fp[i];
8782
8783                 for_each_cos_in_tx_queue(fp, cos)
8784                         rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
8785 #ifdef BNX2X_STOP_ON_ERROR
8786                 if (rc)
8787                         return;
8788 #endif
8789         }
8790
8791         /* Give HW time to discard old tx messages */
8792         usleep_range(1000, 2000);
8793
8794         /* Clean all ETH MACs */
8795         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8796                                 false);
8797         if (rc < 0)
8798                 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8799
8800         /* Clean up UC list  */
8801         rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
8802                                 true);
8803         if (rc < 0)
8804                 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8805                           rc);
8806
8807         /* Disable LLH */
8808         if (!CHIP_IS_E1(bp))
8809                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8810
8811         /* Set "drop all" (stop Rx).
8812          * We need to take a netif_addr_lock() here in order to prevent
8813          * a race between the completion code and this code.
8814          */
8815         netif_addr_lock_bh(bp->dev);
8816         /* Schedule the rx_mode command */
8817         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8818                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8819         else
8820                 bnx2x_set_storm_rx_mode(bp);
8821
8822         /* Cleanup multicast configuration */
8823         rparam.mcast_obj = &bp->mcast_obj;
8824         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8825         if (rc < 0)
8826                 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8827
8828         netif_addr_unlock_bh(bp->dev);
8829
8830         bnx2x_iov_chip_cleanup(bp);
8831
8832         /*
8833          * Send the UNLOAD_REQUEST to the MCP. This will return if
8834          * this function should perform FUNC, PORT or COMMON HW
8835          * reset.
8836          */
8837         reset_code = bnx2x_send_unload_req(bp, unload_mode);
8838
8839         /*
8840          * (assumption: No Attention from MCP at this stage)
8841          * PMF probably in the middle of TX disable/enable transaction
8842          */
8843         rc = bnx2x_func_wait_started(bp);
8844         if (rc) {
8845                 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8846 #ifdef BNX2X_STOP_ON_ERROR
8847                 return;
8848 #endif
8849         }
8850
8851         /* Close multi and leading connections
8852          * Completions for ramrods are collected in a synchronous way
8853          */
8854         for_each_eth_queue(bp, i)
8855                 if (bnx2x_stop_queue(bp, i))
8856 #ifdef BNX2X_STOP_ON_ERROR
8857                         return;
8858 #else
8859                         goto unload_error;
8860 #endif
8861
8862         if (CNIC_LOADED(bp)) {
8863                 for_each_cnic_queue(bp, i)
8864                         if (bnx2x_stop_queue(bp, i))
8865 #ifdef BNX2X_STOP_ON_ERROR
8866                                 return;
8867 #else
8868                                 goto unload_error;
8869 #endif
8870         }
8871
8872         /* If SP settings didn't get completed so far - something
8873          * very wrong has happen.
8874          */
8875         if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8876                 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
8877
8878 #ifndef BNX2X_STOP_ON_ERROR
8879 unload_error:
8880 #endif
8881         rc = bnx2x_func_stop(bp);
8882         if (rc) {
8883                 BNX2X_ERR("Function stop failed!\n");
8884 #ifdef BNX2X_STOP_ON_ERROR
8885                 return;
8886 #endif
8887         }
8888
8889         /* Disable HW interrupts, NAPI */
8890         bnx2x_netif_stop(bp, 1);
8891         /* Delete all NAPI objects */
8892         bnx2x_del_all_napi(bp);
8893         if (CNIC_LOADED(bp))
8894                 bnx2x_del_all_napi_cnic(bp);
8895
8896         /* Release IRQs */
8897         bnx2x_free_irq(bp);
8898
8899         /* Reset the chip */
8900         rc = bnx2x_reset_hw(bp, reset_code);
8901         if (rc)
8902                 BNX2X_ERR("HW_RESET failed\n");
8903
8904         /* Report UNLOAD_DONE to MCP */
8905         bnx2x_send_unload_done(bp, keep_link);
8906 }
8907
8908 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8909 {
8910         u32 val;
8911
8912         DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
8913
8914         if (CHIP_IS_E1(bp)) {
8915                 int port = BP_PORT(bp);
8916                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8917                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8918
8919                 val = REG_RD(bp, addr);
8920                 val &= ~(0x300);
8921                 REG_WR(bp, addr, val);
8922         } else {
8923                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8924                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8925                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8926                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8927         }
8928 }
8929
8930 /* Close gates #2, #3 and #4: */
8931 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8932 {
8933         u32 val;
8934
8935         /* Gates #2 and #4a are closed/opened for "not E1" only */
8936         if (!CHIP_IS_E1(bp)) {
8937                 /* #4 */
8938                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
8939                 /* #2 */
8940                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
8941         }
8942
8943         /* #3 */
8944         if (CHIP_IS_E1x(bp)) {
8945                 /* Prevent interrupts from HC on both ports */
8946                 val = REG_RD(bp, HC_REG_CONFIG_1);
8947                 REG_WR(bp, HC_REG_CONFIG_1,
8948                        (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8949                        (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8950
8951                 val = REG_RD(bp, HC_REG_CONFIG_0);
8952                 REG_WR(bp, HC_REG_CONFIG_0,
8953                        (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8954                        (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8955         } else {
8956                 /* Prevent incoming interrupts in IGU */
8957                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8958
8959                 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8960                        (!close) ?
8961                        (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8962                        (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8963         }
8964
8965         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
8966                 close ? "closing" : "opening");
8967         mmiowb();
8968 }
8969
8970 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8971
8972 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8973 {
8974         /* Do some magic... */
8975         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8976         *magic_val = val & SHARED_MF_CLP_MAGIC;
8977         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8978 }
8979
8980 /**
8981  * bnx2x_clp_reset_done - restore the value of the `magic' bit.
8982  *
8983  * @bp:         driver handle
8984  * @magic_val:  old value of the `magic' bit.
8985  */
8986 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8987 {
8988         /* Restore the `magic' bit value... */
8989         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8990         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8991                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8992 }
8993
8994 /**
8995  * bnx2x_reset_mcp_prep - prepare for MCP reset.
8996  *
8997  * @bp:         driver handle
8998  * @magic_val:  old value of 'magic' bit.
8999  *
9000  * Takes care of CLP configurations.
9001  */
9002 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9003 {
9004         u32 shmem;
9005         u32 validity_offset;
9006
9007         DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9008
9009         /* Set `magic' bit in order to save MF config */
9010         if (!CHIP_IS_E1(bp))
9011                 bnx2x_clp_reset_prep(bp, magic_val);
9012
9013         /* Get shmem offset */
9014         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9015         validity_offset =
9016                 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9017
9018         /* Clear validity map flags */
9019         if (shmem > 0)
9020                 REG_WR(bp, shmem + validity_offset, 0);
9021 }
9022
9023 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
9024 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
9025
9026 /**
9027  * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
9028  *
9029  * @bp: driver handle
9030  */
9031 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9032 {
9033         /* special handling for emulation and FPGA,
9034            wait 10 times longer */
9035         if (CHIP_REV_IS_SLOW(bp))
9036                 msleep(MCP_ONE_TIMEOUT*10);
9037         else
9038                 msleep(MCP_ONE_TIMEOUT);
9039 }
9040
9041 /*
9042  * initializes bp->common.shmem_base and waits for validity signature to appear
9043  */
9044 static int bnx2x_init_shmem(struct bnx2x *bp)
9045 {
9046         int cnt = 0;
9047         u32 val = 0;
9048
9049         do {
9050                 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9051                 if (bp->common.shmem_base) {
9052                         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9053                         if (val & SHR_MEM_VALIDITY_MB)
9054                                 return 0;
9055                 }
9056
9057                 bnx2x_mcp_wait_one(bp);
9058
9059         } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9060
9061         BNX2X_ERR("BAD MCP validity signature\n");
9062
9063         return -ENODEV;
9064 }
9065
9066 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9067 {
9068         int rc = bnx2x_init_shmem(bp);
9069
9070         /* Restore the `magic' bit value */
9071         if (!CHIP_IS_E1(bp))
9072                 bnx2x_clp_reset_done(bp, magic_val);
9073
9074         return rc;
9075 }
9076
9077 static void bnx2x_pxp_prep(struct bnx2x *bp)
9078 {
9079         if (!CHIP_IS_E1(bp)) {
9080                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9081                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9082                 mmiowb();
9083         }
9084 }
9085
9086 /*
9087  * Reset the whole chip except for:
9088  *      - PCIE core
9089  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9090  *              one reset bit)
9091  *      - IGU
9092  *      - MISC (including AEU)
9093  *      - GRC
9094  *      - RBCN, RBCP
9095  */
9096 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9097 {
9098         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9099         u32 global_bits2, stay_reset2;
9100
9101         /*
9102          * Bits that have to be set in reset_mask2 if we want to reset 'global'
9103          * (per chip) blocks.
9104          */
9105         global_bits2 =
9106                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9107                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9108
9109         /* Don't reset the following blocks.
9110          * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9111          *            reset, as in 4 port device they might still be owned
9112          *            by the MCP (there is only one leader per path).
9113          */
9114         not_reset_mask1 =
9115                 MISC_REGISTERS_RESET_REG_1_RST_HC |
9116                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9117                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9118
9119         not_reset_mask2 =
9120                 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9121                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9122                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9123                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9124                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9125                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
9126                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9127                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9128                 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9129                 MISC_REGISTERS_RESET_REG_2_PGLC |
9130                 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9131                 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9132                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9133                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9134                 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9135                 MISC_REGISTERS_RESET_REG_2_UMAC1;
9136
9137         /*
9138          * Keep the following blocks in reset:
9139          *  - all xxMACs are handled by the bnx2x_link code.
9140          */
9141         stay_reset2 =
9142                 MISC_REGISTERS_RESET_REG_2_XMAC |
9143                 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9144
9145         /* Full reset masks according to the chip */
9146         reset_mask1 = 0xffffffff;
9147
9148         if (CHIP_IS_E1(bp))
9149                 reset_mask2 = 0xffff;
9150         else if (CHIP_IS_E1H(bp))
9151                 reset_mask2 = 0x1ffff;
9152         else if (CHIP_IS_E2(bp))
9153                 reset_mask2 = 0xfffff;
9154         else /* CHIP_IS_E3 */
9155                 reset_mask2 = 0x3ffffff;
9156
9157         /* Don't reset global blocks unless we need to */
9158         if (!global)
9159                 reset_mask2 &= ~global_bits2;
9160
9161         /*
9162          * In case of attention in the QM, we need to reset PXP
9163          * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9164          * because otherwise QM reset would release 'close the gates' shortly
9165          * before resetting the PXP, then the PSWRQ would send a write
9166          * request to PGLUE. Then when PXP is reset, PGLUE would try to
9167          * read the payload data from PSWWR, but PSWWR would not
9168          * respond. The write queue in PGLUE would stuck, dmae commands
9169          * would not return. Therefore it's important to reset the second
9170          * reset register (containing the
9171          * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9172          * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9173          * bit).
9174          */
9175         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9176                reset_mask2 & (~not_reset_mask2));
9177
9178         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9179                reset_mask1 & (~not_reset_mask1));
9180
9181         barrier();
9182         mmiowb();
9183
9184         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9185                reset_mask2 & (~stay_reset2));
9186
9187         barrier();
9188         mmiowb();
9189
9190         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9191         mmiowb();
9192 }
9193
9194 /**
9195  * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9196  * It should get cleared in no more than 1s.
9197  *
9198  * @bp: driver handle
9199  *
9200  * It should get cleared in no more than 1s. Returns 0 if
9201  * pending writes bit gets cleared.
9202  */
9203 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9204 {
9205         u32 cnt = 1000;
9206         u32 pend_bits = 0;
9207
9208         do {
9209                 pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9210
9211                 if (pend_bits == 0)
9212                         break;
9213
9214                 usleep_range(1000, 2000);
9215         } while (cnt-- > 0);
9216
9217         if (cnt <= 0) {
9218                 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9219                           pend_bits);
9220                 return -EBUSY;
9221         }
9222
9223         return 0;
9224 }
9225
9226 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9227 {
9228         int cnt = 1000;
9229         u32 val = 0;
9230         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9231         u32 tags_63_32 = 0;
9232
9233         /* Empty the Tetris buffer, wait for 1s */
9234         do {
9235                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9236                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9237                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9238                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9239                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9240                 if (CHIP_IS_E3(bp))
9241                         tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9242
9243                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9244                     ((port_is_idle_0 & 0x1) == 0x1) &&
9245                     ((port_is_idle_1 & 0x1) == 0x1) &&
9246                     (pgl_exp_rom2 == 0xffffffff) &&
9247                     (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9248                         break;
9249                 usleep_range(1000, 2000);
9250         } while (cnt-- > 0);
9251
9252         if (cnt <= 0) {
9253                 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9254                 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9255                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9256                           pgl_exp_rom2);
9257                 return -EAGAIN;
9258         }
9259
9260         barrier();
9261
9262         /* Close gates #2, #3 and #4 */
9263         bnx2x_set_234_gates(bp, true);
9264
9265         /* Poll for IGU VQs for 57712 and newer chips */
9266         if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9267                 return -EAGAIN;
9268
9269         /* TBD: Indicate that "process kill" is in progress to MCP */
9270
9271         /* Clear "unprepared" bit */
9272         REG_WR(bp, MISC_REG_UNPREPARED, 0);
9273         barrier();
9274
9275         /* Make sure all is written to the chip before the reset */
9276         mmiowb();
9277
9278         /* Wait for 1ms to empty GLUE and PCI-E core queues,
9279          * PSWHST, GRC and PSWRD Tetris buffer.
9280          */
9281         usleep_range(1000, 2000);
9282
9283         /* Prepare to chip reset: */
9284         /* MCP */
9285         if (global)
9286                 bnx2x_reset_mcp_prep(bp, &val);
9287
9288         /* PXP */
9289         bnx2x_pxp_prep(bp);
9290         barrier();
9291
9292         /* reset the chip */
9293         bnx2x_process_kill_chip_reset(bp, global);
9294         barrier();
9295
9296         /* Recover after reset: */
9297         /* MCP */
9298         if (global && bnx2x_reset_mcp_comp(bp, val))
9299                 return -EAGAIN;
9300
9301         /* TBD: Add resetting the NO_MCP mode DB here */
9302
9303         /* Open the gates #2, #3 and #4 */
9304         bnx2x_set_234_gates(bp, false);
9305
9306         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9307          * reset state, re-enable attentions. */
9308
9309         return 0;
9310 }
9311
9312 static int bnx2x_leader_reset(struct bnx2x *bp)
9313 {
9314         int rc = 0;
9315         bool global = bnx2x_reset_is_global(bp);
9316         u32 load_code;
9317
9318         /* if not going to reset MCP - load "fake" driver to reset HW while
9319          * driver is owner of the HW
9320          */
9321         if (!global && !BP_NOMCP(bp)) {
9322                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9323                                              DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9324                 if (!load_code) {
9325                         BNX2X_ERR("MCP response failure, aborting\n");
9326                         rc = -EAGAIN;
9327                         goto exit_leader_reset;
9328                 }
9329                 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9330                     (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9331                         BNX2X_ERR("MCP unexpected resp, aborting\n");
9332                         rc = -EAGAIN;
9333                         goto exit_leader_reset2;
9334                 }
9335                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9336                 if (!load_code) {
9337                         BNX2X_ERR("MCP response failure, aborting\n");
9338                         rc = -EAGAIN;
9339                         goto exit_leader_reset2;
9340                 }
9341         }
9342
9343         /* Try to recover after the failure */
9344         if (bnx2x_process_kill(bp, global)) {
9345                 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9346                           BP_PATH(bp));
9347                 rc = -EAGAIN;
9348                 goto exit_leader_reset2;
9349         }
9350
9351         /*
9352          * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9353          * state.
9354          */
9355         bnx2x_set_reset_done(bp);
9356         if (global)
9357                 bnx2x_clear_reset_global(bp);
9358
9359 exit_leader_reset2:
9360         /* unload "fake driver" if it was loaded */
9361         if (!global && !BP_NOMCP(bp)) {
9362                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9363                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9364         }
9365 exit_leader_reset:
9366         bp->is_leader = 0;
9367         bnx2x_release_leader_lock(bp);
9368         smp_mb();
9369         return rc;
9370 }
9371
9372 static void bnx2x_recovery_failed(struct bnx2x *bp)
9373 {
9374         netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9375
9376         /* Disconnect this device */
9377         netif_device_detach(bp->dev);
9378
9379         /*
9380          * Block ifup for all function on this engine until "process kill"
9381          * or power cycle.
9382          */
9383         bnx2x_set_reset_in_progress(bp);
9384
9385         /* Shut down the power */
9386         bnx2x_set_power_state(bp, PCI_D3hot);
9387
9388         bp->recovery_state = BNX2X_RECOVERY_FAILED;
9389
9390         smp_mb();
9391 }
9392
9393 /*
9394  * Assumption: runs under rtnl lock. This together with the fact
9395  * that it's called only from bnx2x_sp_rtnl() ensure that it
9396  * will never be called when netif_running(bp->dev) is false.
9397  */
9398 static void bnx2x_parity_recover(struct bnx2x *bp)
9399 {
9400         bool global = false;
9401         u32 error_recovered, error_unrecovered;
9402         bool is_parity;
9403
9404         DP(NETIF_MSG_HW, "Handling parity\n");
9405         while (1) {
9406                 switch (bp->recovery_state) {
9407                 case BNX2X_RECOVERY_INIT:
9408                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
9409                         is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9410                         WARN_ON(!is_parity);
9411
9412                         /* Try to get a LEADER_LOCK HW lock */
9413                         if (bnx2x_trylock_leader_lock(bp)) {
9414                                 bnx2x_set_reset_in_progress(bp);
9415                                 /*
9416                                  * Check if there is a global attention and if
9417                                  * there was a global attention, set the global
9418                                  * reset bit.
9419                                  */
9420
9421                                 if (global)
9422                                         bnx2x_set_reset_global(bp);
9423
9424                                 bp->is_leader = 1;
9425                         }
9426
9427                         /* Stop the driver */
9428                         /* If interface has been removed - break */
9429                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
9430                                 return;
9431
9432                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
9433
9434                         /* Ensure "is_leader", MCP command sequence and
9435                          * "recovery_state" update values are seen on other
9436                          * CPUs.
9437                          */
9438                         smp_mb();
9439                         break;
9440
9441                 case BNX2X_RECOVERY_WAIT:
9442                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9443                         if (bp->is_leader) {
9444                                 int other_engine = BP_PATH(bp) ? 0 : 1;
9445                                 bool other_load_status =
9446                                         bnx2x_get_load_status(bp, other_engine);
9447                                 bool load_status =
9448                                         bnx2x_get_load_status(bp, BP_PATH(bp));
9449                                 global = bnx2x_reset_is_global(bp);
9450
9451                                 /*
9452                                  * In case of a parity in a global block, let
9453                                  * the first leader that performs a
9454                                  * leader_reset() reset the global blocks in
9455                                  * order to clear global attentions. Otherwise
9456                                  * the gates will remain closed for that
9457                                  * engine.
9458                                  */
9459                                 if (load_status ||
9460                                     (global && other_load_status)) {
9461                                         /* Wait until all other functions get
9462                                          * down.
9463                                          */
9464                                         schedule_delayed_work(&bp->sp_rtnl_task,
9465                                                                 HZ/10);
9466                                         return;
9467                                 } else {
9468                                         /* If all other functions got down -
9469                                          * try to bring the chip back to
9470                                          * normal. In any case it's an exit
9471                                          * point for a leader.
9472                                          */
9473                                         if (bnx2x_leader_reset(bp)) {
9474                                                 bnx2x_recovery_failed(bp);
9475                                                 return;
9476                                         }
9477
9478                                         /* If we are here, means that the
9479                                          * leader has succeeded and doesn't
9480                                          * want to be a leader any more. Try
9481                                          * to continue as a none-leader.
9482                                          */
9483                                         break;
9484                                 }
9485                         } else { /* non-leader */
9486                                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
9487                                         /* Try to get a LEADER_LOCK HW lock as
9488                                          * long as a former leader may have
9489                                          * been unloaded by the user or
9490                                          * released a leadership by another
9491                                          * reason.
9492                                          */
9493                                         if (bnx2x_trylock_leader_lock(bp)) {
9494                                                 /* I'm a leader now! Restart a
9495                                                  * switch case.
9496                                                  */
9497                                                 bp->is_leader = 1;
9498                                                 break;
9499                                         }
9500
9501                                         schedule_delayed_work(&bp->sp_rtnl_task,
9502                                                                 HZ/10);
9503                                         return;
9504
9505                                 } else {
9506                                         /*
9507                                          * If there was a global attention, wait
9508                                          * for it to be cleared.
9509                                          */
9510                                         if (bnx2x_reset_is_global(bp)) {
9511                                                 schedule_delayed_work(
9512                                                         &bp->sp_rtnl_task,
9513                                                         HZ/10);
9514                                                 return;
9515                                         }
9516
9517                                         error_recovered =
9518                                           bp->eth_stats.recoverable_error;
9519                                         error_unrecovered =
9520                                           bp->eth_stats.unrecoverable_error;
9521                                         bp->recovery_state =
9522                                                 BNX2X_RECOVERY_NIC_LOADING;
9523                                         if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
9524                                                 error_unrecovered++;
9525                                                 netdev_err(bp->dev,
9526                                                            "Recovery failed. Power cycle needed\n");
9527                                                 /* Disconnect this device */
9528                                                 netif_device_detach(bp->dev);
9529                                                 /* Shut down the power */
9530                                                 bnx2x_set_power_state(
9531                                                         bp, PCI_D3hot);
9532                                                 smp_mb();
9533                                         } else {
9534                                                 bp->recovery_state =
9535                                                         BNX2X_RECOVERY_DONE;
9536                                                 error_recovered++;
9537                                                 smp_mb();
9538                                         }
9539                                         bp->eth_stats.recoverable_error =
9540                                                 error_recovered;
9541                                         bp->eth_stats.unrecoverable_error =
9542                                                 error_unrecovered;
9543
9544                                         return;
9545                                 }
9546                         }
9547                 default:
9548                         return;
9549                 }
9550         }
9551 }
9552
9553 static int bnx2x_close(struct net_device *dev);
9554
9555 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9556  * scheduled on a general queue in order to prevent a dead lock.
9557  */
9558 static void bnx2x_sp_rtnl_task(struct work_struct *work)
9559 {
9560         struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
9561
9562         rtnl_lock();
9563
9564         if (!netif_running(bp->dev)) {
9565                 rtnl_unlock();
9566                 return;
9567         }
9568
9569         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9570 #ifdef BNX2X_STOP_ON_ERROR
9571                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9572                           "you will need to reboot when done\n");
9573                 goto sp_rtnl_not_reset;
9574 #endif
9575                 /*
9576                  * Clear all pending SP commands as we are going to reset the
9577                  * function anyway.
9578                  */
9579                 bp->sp_rtnl_state = 0;
9580                 smp_mb();
9581
9582                 bnx2x_parity_recover(bp);
9583
9584                 rtnl_unlock();
9585                 return;
9586         }
9587
9588         if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9589 #ifdef BNX2X_STOP_ON_ERROR
9590                 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
9591                           "you will need to reboot when done\n");
9592                 goto sp_rtnl_not_reset;
9593 #endif
9594
9595                 /*
9596                  * Clear all pending SP commands as we are going to reset the
9597                  * function anyway.
9598                  */
9599                 bp->sp_rtnl_state = 0;
9600                 smp_mb();
9601
9602                 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
9603                 bnx2x_nic_load(bp, LOAD_NORMAL);
9604
9605                 rtnl_unlock();
9606                 return;
9607         }
9608 #ifdef BNX2X_STOP_ON_ERROR
9609 sp_rtnl_not_reset:
9610 #endif
9611         if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9612                 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
9613         if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9614                 bnx2x_after_function_update(bp);
9615         /*
9616          * in case of fan failure we need to reset id if the "stop on error"
9617          * debug flag is set, since we trying to prevent permanent overheating
9618          * damage
9619          */
9620         if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
9621                 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
9622                 netif_device_detach(bp->dev);
9623                 bnx2x_close(bp->dev);
9624                 rtnl_unlock();
9625                 return;
9626         }
9627
9628         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9629                 DP(BNX2X_MSG_SP,
9630                    "sending set mcast vf pf channel message from rtnl sp-task\n");
9631                 bnx2x_vfpf_set_mcast(bp->dev);
9632         }
9633         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
9634                                &bp->sp_rtnl_state)){
9635                 if (!test_bit(__LINK_STATE_NOCARRIER, &bp->dev->state)) {
9636                         bnx2x_tx_disable(bp);
9637                         BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
9638                 }
9639         }
9640
9641         if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
9642                                &bp->sp_rtnl_state)) {
9643                 DP(BNX2X_MSG_SP,
9644                    "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
9645                 bnx2x_vfpf_storm_rx_mode(bp);
9646         }
9647
9648         if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9649                                &bp->sp_rtnl_state))
9650                 bnx2x_pf_set_vfs_vlan(bp);
9651
9652         /* work which needs rtnl lock not-taken (as it takes the lock itself and
9653          * can be called from other contexts as well)
9654          */
9655         rtnl_unlock();
9656
9657         /* enable SR-IOV if applicable */
9658         if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
9659                                                &bp->sp_rtnl_state)) {
9660                 bnx2x_disable_sriov(bp);
9661                 bnx2x_enable_sriov(bp);
9662         }
9663 }
9664
9665 static void bnx2x_period_task(struct work_struct *work)
9666 {
9667         struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9668
9669         if (!netif_running(bp->dev))
9670                 goto period_task_exit;
9671
9672         if (CHIP_REV_IS_SLOW(bp)) {
9673                 BNX2X_ERR("period task called on emulation, ignoring\n");
9674                 goto period_task_exit;
9675         }
9676
9677         bnx2x_acquire_phy_lock(bp);
9678         /*
9679          * The barrier is needed to ensure the ordering between the writing to
9680          * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9681          * the reading here.
9682          */
9683         smp_mb();
9684         if (bp->port.pmf) {
9685                 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9686
9687                 /* Re-queue task in 1 sec */
9688                 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9689         }
9690
9691         bnx2x_release_phy_lock(bp);
9692 period_task_exit:
9693         return;
9694 }
9695
9696 /*
9697  * Init service functions
9698  */
9699
9700 u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
9701 {
9702         u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9703         u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9704         return base + (BP_ABS_FUNC(bp)) * stride;
9705 }
9706
9707 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9708                                         struct bnx2x_mac_vals *vals)
9709 {
9710         u32 val, base_addr, offset, mask, reset_reg;
9711         bool mac_stopped = false;
9712         u8 port = BP_PORT(bp);
9713
9714         /* reset addresses as they also mark which values were changed */
9715         vals->bmac_addr = 0;
9716         vals->umac_addr = 0;
9717         vals->xmac_addr = 0;
9718         vals->emac_addr = 0;
9719
9720         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
9721
9722         if (!CHIP_IS_E3(bp)) {
9723                 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9724                 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9725                 if ((mask & reset_reg) && val) {
9726                         u32 wb_data[2];
9727                         BNX2X_DEV_INFO("Disable bmac Rx\n");
9728                         base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9729                                                 : NIG_REG_INGRESS_BMAC0_MEM;
9730                         offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9731                                                 : BIGMAC_REGISTER_BMAC_CONTROL;
9732
9733                         /*
9734                          * use rd/wr since we cannot use dmae. This is safe
9735                          * since MCP won't access the bus due to the request
9736                          * to unload, and no function on the path can be
9737                          * loaded at this time.
9738                          */
9739                         wb_data[0] = REG_RD(bp, base_addr + offset);
9740                         wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
9741                         vals->bmac_addr = base_addr + offset;
9742                         vals->bmac_val[0] = wb_data[0];
9743                         vals->bmac_val[1] = wb_data[1];
9744                         wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
9745                         REG_WR(bp, vals->bmac_addr, wb_data[0]);
9746                         REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
9747                 }
9748                 BNX2X_DEV_INFO("Disable emac Rx\n");
9749                 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9750                 vals->emac_val = REG_RD(bp, vals->emac_addr);
9751                 REG_WR(bp, vals->emac_addr, 0);
9752                 mac_stopped = true;
9753         } else {
9754                 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9755                         BNX2X_DEV_INFO("Disable xmac Rx\n");
9756                         base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9757                         val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9758                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9759                                val & ~(1 << 1));
9760                         REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9761                                val | (1 << 1));
9762                         vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9763                         vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9764                         REG_WR(bp, vals->xmac_addr, 0);
9765                         mac_stopped = true;
9766                 }
9767                 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9768                 if (mask & reset_reg) {
9769                         BNX2X_DEV_INFO("Disable umac Rx\n");
9770                         base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
9771                         vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9772                         vals->umac_val = REG_RD(bp, vals->umac_addr);
9773                         REG_WR(bp, vals->umac_addr, 0);
9774                         mac_stopped = true;
9775                 }
9776         }
9777
9778         if (mac_stopped)
9779                 msleep(20);
9780 }
9781
9782 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9783 #define BNX2X_PREV_UNDI_RCQ(val)        ((val) & 0xffff)
9784 #define BNX2X_PREV_UNDI_BD(val)         ((val) >> 16 & 0xffff)
9785 #define BNX2X_PREV_UNDI_PROD(rcq, bd)   ((bd) << 16 | (rcq))
9786
9787 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
9788 {
9789         u16 rcq, bd;
9790         u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9791
9792         rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9793         bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9794
9795         tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9796         REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9797
9798         BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9799                        port, bd, rcq);
9800 }
9801
9802 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
9803 {
9804         u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9805                                   DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
9806         if (!rc) {
9807                 BNX2X_ERR("MCP response failure, aborting\n");
9808                 return -EBUSY;
9809         }
9810
9811         return 0;
9812 }
9813
9814 static struct bnx2x_prev_path_list *
9815                 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9816 {
9817         struct bnx2x_prev_path_list *tmp_list;
9818
9819         list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9820                 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9821                     bp->pdev->bus->number == tmp_list->bus &&
9822                     BP_PATH(bp) == tmp_list->path)
9823                         return tmp_list;
9824
9825         return NULL;
9826 }
9827
9828 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9829 {
9830         struct bnx2x_prev_path_list *tmp_list;
9831         int rc;
9832
9833         rc = down_interruptible(&bnx2x_prev_sem);
9834         if (rc) {
9835                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9836                 return rc;
9837         }
9838
9839         tmp_list = bnx2x_prev_path_get_entry(bp);
9840         if (tmp_list) {
9841                 tmp_list->aer = 1;
9842                 rc = 0;
9843         } else {
9844                 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9845                           BP_PATH(bp));
9846         }
9847
9848         up(&bnx2x_prev_sem);
9849
9850         return rc;
9851 }
9852
9853 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9854 {
9855         struct bnx2x_prev_path_list *tmp_list;
9856         int rc = false;
9857
9858         if (down_trylock(&bnx2x_prev_sem))
9859                 return false;
9860
9861         tmp_list = bnx2x_prev_path_get_entry(bp);
9862         if (tmp_list) {
9863                 if (tmp_list->aer) {
9864                         DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9865                            BP_PATH(bp));
9866                 } else {
9867                         rc = true;
9868                         BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9869                                        BP_PATH(bp));
9870                 }
9871         }
9872
9873         up(&bnx2x_prev_sem);
9874
9875         return rc;
9876 }
9877
9878 bool bnx2x_port_after_undi(struct bnx2x *bp)
9879 {
9880         struct bnx2x_prev_path_list *entry;
9881         bool val;
9882
9883         down(&bnx2x_prev_sem);
9884
9885         entry = bnx2x_prev_path_get_entry(bp);
9886         val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9887
9888         up(&bnx2x_prev_sem);
9889
9890         return val;
9891 }
9892
9893 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9894 {
9895         struct bnx2x_prev_path_list *tmp_list;
9896         int rc;
9897
9898         rc = down_interruptible(&bnx2x_prev_sem);
9899         if (rc) {
9900                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9901                 return rc;
9902         }
9903
9904         /* Check whether the entry for this path already exists */
9905         tmp_list = bnx2x_prev_path_get_entry(bp);
9906         if (tmp_list) {
9907                 if (!tmp_list->aer) {
9908                         BNX2X_ERR("Re-Marking the path.\n");
9909                 } else {
9910                         DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9911                            BP_PATH(bp));
9912                         tmp_list->aer = 0;
9913                 }
9914                 up(&bnx2x_prev_sem);
9915                 return 0;
9916         }
9917         up(&bnx2x_prev_sem);
9918
9919         /* Create an entry for this path and add it */
9920         tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9921         if (!tmp_list) {
9922                 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9923                 return -ENOMEM;
9924         }
9925
9926         tmp_list->bus = bp->pdev->bus->number;
9927         tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9928         tmp_list->path = BP_PATH(bp);
9929         tmp_list->aer = 0;
9930         tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9931
9932         rc = down_interruptible(&bnx2x_prev_sem);
9933         if (rc) {
9934                 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9935                 kfree(tmp_list);
9936         } else {
9937                 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
9938                    BP_PATH(bp));
9939                 list_add(&tmp_list->list, &bnx2x_prev_list);
9940                 up(&bnx2x_prev_sem);
9941         }
9942
9943         return rc;
9944 }
9945
9946 static int bnx2x_do_flr(struct bnx2x *bp)
9947 {
9948         int i;
9949         u16 status;
9950         struct pci_dev *dev = bp->pdev;
9951
9952         if (CHIP_IS_E1x(bp)) {
9953                 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9954                 return -EINVAL;
9955         }
9956
9957         /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9958         if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9959                 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9960                           bp->common.bc_ver);
9961                 return -EINVAL;
9962         }
9963
9964         /* Wait for Transaction Pending bit clean */
9965         for (i = 0; i < 4; i++) {
9966                 if (i)
9967                         msleep((1 << (i - 1)) * 100);
9968
9969                 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
9970                 if (!(status & PCI_EXP_DEVSTA_TRPND))
9971                         goto clear;
9972         }
9973
9974         dev_err(&dev->dev,
9975                 "transaction is not cleared; proceeding with reset anyway\n");
9976
9977 clear:
9978
9979         BNX2X_DEV_INFO("Initiating FLR\n");
9980         bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9981
9982         return 0;
9983 }
9984
9985 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
9986 {
9987         int rc;
9988
9989         BNX2X_DEV_INFO("Uncommon unload Flow\n");
9990
9991         /* Test if previous unload process was already finished for this path */
9992         if (bnx2x_prev_is_path_marked(bp))
9993                 return bnx2x_prev_mcp_done(bp);
9994
9995         BNX2X_DEV_INFO("Path is unmarked\n");
9996
9997         /* If function has FLR capabilities, and existing FW version matches
9998          * the one required, then FLR will be sufficient to clean any residue
9999          * left by previous driver
10000          */
10001         rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
10002
10003         if (!rc) {
10004                 /* fw version is good */
10005                 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10006                 rc = bnx2x_do_flr(bp);
10007         }
10008
10009         if (!rc) {
10010                 /* FLR was performed */
10011                 BNX2X_DEV_INFO("FLR successful\n");
10012                 return 0;
10013         }
10014
10015         BNX2X_DEV_INFO("Could not FLR\n");
10016
10017         /* Close the MCP request, return failure*/
10018         rc = bnx2x_prev_mcp_done(bp);
10019         if (!rc)
10020                 rc = BNX2X_PREV_WAIT_NEEDED;
10021
10022         return rc;
10023 }
10024
10025 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10026 {
10027         u32 reset_reg, tmp_reg = 0, rc;
10028         bool prev_undi = false;
10029         struct bnx2x_mac_vals mac_vals;
10030
10031         /* It is possible a previous function received 'common' answer,
10032          * but hasn't loaded yet, therefore creating a scenario of
10033          * multiple functions receiving 'common' on the same path.
10034          */
10035         BNX2X_DEV_INFO("Common unload Flow\n");
10036
10037         memset(&mac_vals, 0, sizeof(mac_vals));
10038
10039         if (bnx2x_prev_is_path_marked(bp))
10040                 return bnx2x_prev_mcp_done(bp);
10041
10042         reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10043
10044         /* Reset should be performed after BRB is emptied */
10045         if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10046                 u32 timer_count = 1000;
10047
10048                 /* Close the MAC Rx to prevent BRB from filling up */
10049                 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10050
10051                 /* close LLH filters towards the BRB */
10052                 bnx2x_set_rx_filter(&bp->link_params, 0);
10053
10054                 /* Check if the UNDI driver was previously loaded
10055                  * UNDI driver initializes CID offset for normal bell to 0x7
10056                  */
10057                 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
10058                         tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
10059                         if (tmp_reg == 0x7) {
10060                                 BNX2X_DEV_INFO("UNDI previously loaded\n");
10061                                 prev_undi = true;
10062                                 /* clear the UNDI indication */
10063                                 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10064                                 /* clear possible idle check errors */
10065                                 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10066                         }
10067                 }
10068                 if (!CHIP_IS_E1x(bp))
10069                         /* block FW from writing to host */
10070                         REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10071
10072                 /* wait until BRB is empty */
10073                 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10074                 while (timer_count) {
10075                         u32 prev_brb = tmp_reg;
10076
10077                         tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10078                         if (!tmp_reg)
10079                                 break;
10080
10081                         BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10082
10083                         /* reset timer as long as BRB actually gets emptied */
10084                         if (prev_brb > tmp_reg)
10085                                 timer_count = 1000;
10086                         else
10087                                 timer_count--;
10088
10089                         /* If UNDI resides in memory, manually increment it */
10090                         if (prev_undi)
10091                                 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
10092
10093                         udelay(10);
10094                 }
10095
10096                 if (!timer_count)
10097                         BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10098         }
10099
10100         /* No packets are in the pipeline, path is ready for reset */
10101         bnx2x_reset_common(bp);
10102
10103         if (mac_vals.xmac_addr)
10104                 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10105         if (mac_vals.umac_addr)
10106                 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
10107         if (mac_vals.emac_addr)
10108                 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10109         if (mac_vals.bmac_addr) {
10110                 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10111                 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10112         }
10113
10114         rc = bnx2x_prev_mark_path(bp, prev_undi);
10115         if (rc) {
10116                 bnx2x_prev_mcp_done(bp);
10117                 return rc;
10118         }
10119
10120         return bnx2x_prev_mcp_done(bp);
10121 }
10122
10123 /* previous driver DMAE transaction may have occurred when pre-boot stage ended
10124  * and boot began, or when kdump kernel was loaded. Either case would invalidate
10125  * the addresses of the transaction, resulting in was-error bit set in the pci
10126  * causing all hw-to-host pcie transactions to timeout. If this happened we want
10127  * to clear the interrupt which detected this from the pglueb and the was done
10128  * bit
10129  */
10130 static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
10131 {
10132         if (!CHIP_IS_E1x(bp)) {
10133                 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10134                 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
10135                         DP(BNX2X_MSG_SP,
10136                            "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
10137                         REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10138                                1 << BP_FUNC(bp));
10139                 }
10140         }
10141 }
10142
10143 static int bnx2x_prev_unload(struct bnx2x *bp)
10144 {
10145         int time_counter = 10;
10146         u32 rc, fw, hw_lock_reg, hw_lock_val;
10147         BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10148
10149         /* clear hw from errors which may have resulted from an interrupted
10150          * dmae transaction.
10151          */
10152         bnx2x_prev_interrupted_dmae(bp);
10153
10154         /* Release previously held locks */
10155         hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10156                       (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10157                       (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10158
10159         hw_lock_val = REG_RD(bp, hw_lock_reg);
10160         if (hw_lock_val) {
10161                 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10162                         BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10163                         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10164                                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10165                 }
10166
10167                 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10168                 REG_WR(bp, hw_lock_reg, 0xffffffff);
10169         } else
10170                 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10171
10172         if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10173                 BNX2X_DEV_INFO("Release previously held alr\n");
10174                 bnx2x_release_alr(bp);
10175         }
10176
10177         do {
10178                 int aer = 0;
10179                 /* Lock MCP using an unload request */
10180                 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10181                 if (!fw) {
10182                         BNX2X_ERR("MCP response failure, aborting\n");
10183                         rc = -EBUSY;
10184                         break;
10185                 }
10186
10187                 rc = down_interruptible(&bnx2x_prev_sem);
10188                 if (rc) {
10189                         BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10190                                   rc);
10191                 } else {
10192                         /* If Path is marked by EEH, ignore unload status */
10193                         aer = !!(bnx2x_prev_path_get_entry(bp) &&
10194                                  bnx2x_prev_path_get_entry(bp)->aer);
10195                         up(&bnx2x_prev_sem);
10196                 }
10197
10198                 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10199                         rc = bnx2x_prev_unload_common(bp);
10200                         break;
10201                 }
10202
10203                 /* non-common reply from MCP might require looping */
10204                 rc = bnx2x_prev_unload_uncommon(bp);
10205                 if (rc != BNX2X_PREV_WAIT_NEEDED)
10206                         break;
10207
10208                 msleep(20);
10209         } while (--time_counter);
10210
10211         if (!time_counter || rc) {
10212                 BNX2X_ERR("Failed unloading previous driver, aborting\n");
10213                 rc = -EBUSY;
10214         }
10215
10216         /* Mark function if its port was used to boot from SAN */
10217         if (bnx2x_port_after_undi(bp))
10218                 bp->link_params.feature_config_flags |=
10219                         FEATURE_CONFIG_BOOT_FROM_SAN;
10220
10221         BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10222
10223         return rc;
10224 }
10225
10226 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10227 {
10228         u32 val, val2, val3, val4, id, boot_mode;
10229         u16 pmc;
10230
10231         /* Get the chip revision id and number. */
10232         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10233         val = REG_RD(bp, MISC_REG_CHIP_NUM);
10234         id = ((val & 0xffff) << 16);
10235         val = REG_RD(bp, MISC_REG_CHIP_REV);
10236         id |= ((val & 0xf) << 12);
10237
10238         /* Metal is read from PCI regs, but we can't access >=0x400 from
10239          * the configuration space (so we need to reg_rd)
10240          */
10241         val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10242         id |= (((val >> 24) & 0xf) << 4);
10243         val = REG_RD(bp, MISC_REG_BOND_ID);
10244         id |= (val & 0xf);
10245         bp->common.chip_id = id;
10246
10247         /* force 57811 according to MISC register */
10248         if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10249                 if (CHIP_IS_57810(bp))
10250                         bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10251                                 (bp->common.chip_id & 0x0000FFFF);
10252                 else if (CHIP_IS_57810_MF(bp))
10253                         bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10254                                 (bp->common.chip_id & 0x0000FFFF);
10255                 bp->common.chip_id |= 0x1;
10256         }
10257
10258         /* Set doorbell size */
10259         bp->db_size = (1 << BNX2X_DB_SHIFT);
10260
10261         if (!CHIP_IS_E1x(bp)) {
10262                 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10263                 if ((val & 1) == 0)
10264                         val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10265                 else
10266                         val = (val >> 1) & 1;
10267                 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10268                                                        "2_PORT_MODE");
10269                 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10270                                                  CHIP_2_PORT_MODE;
10271
10272                 if (CHIP_MODE_IS_4_PORT(bp))
10273                         bp->pfid = (bp->pf_num >> 1);   /* 0..3 */
10274                 else
10275                         bp->pfid = (bp->pf_num & 0x6);  /* 0, 2, 4, 6 */
10276         } else {
10277                 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10278                 bp->pfid = bp->pf_num;                  /* 0..7 */
10279         }
10280
10281         BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10282
10283         bp->link_params.chip_id = bp->common.chip_id;
10284         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10285
10286         val = (REG_RD(bp, 0x2874) & 0x55);
10287         if ((bp->common.chip_id & 0x1) ||
10288             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10289                 bp->flags |= ONE_PORT_FLAG;
10290                 BNX2X_DEV_INFO("single port device\n");
10291         }
10292
10293         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
10294         bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
10295                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
10296         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10297                        bp->common.flash_size, bp->common.flash_size);
10298
10299         bnx2x_init_shmem(bp);
10300
10301         bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10302                                         MISC_REG_GENERIC_CR_1 :
10303                                         MISC_REG_GENERIC_CR_0));
10304
10305         bp->link_params.shmem_base = bp->common.shmem_base;
10306         bp->link_params.shmem2_base = bp->common.shmem2_base;
10307         if (SHMEM2_RD(bp, size) >
10308             (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10309                 bp->link_params.lfa_base =
10310                 REG_RD(bp, bp->common.shmem2_base +
10311                        (u32)offsetof(struct shmem2_region,
10312                                      lfa_host_addr[BP_PORT(bp)]));
10313         else
10314                 bp->link_params.lfa_base = 0;
10315         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
10316                        bp->common.shmem_base, bp->common.shmem2_base);
10317
10318         if (!bp->common.shmem_base) {
10319                 BNX2X_DEV_INFO("MCP not active\n");
10320                 bp->flags |= NO_MCP_FLAG;
10321                 return;
10322         }
10323
10324         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
10325         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
10326
10327         bp->link_params.hw_led_mode = ((bp->common.hw_config &
10328                                         SHARED_HW_CFG_LED_MODE_MASK) >>
10329                                        SHARED_HW_CFG_LED_MODE_SHIFT);
10330
10331         bp->link_params.feature_config_flags = 0;
10332         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10333         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10334                 bp->link_params.feature_config_flags |=
10335                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10336         else
10337                 bp->link_params.feature_config_flags &=
10338                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10339
10340         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10341         bp->common.bc_ver = val;
10342         BNX2X_DEV_INFO("bc_ver %X\n", val);
10343         if (val < BNX2X_BC_VER) {
10344                 /* for now only warn
10345                  * later we might need to enforce this */
10346                 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10347                           BNX2X_BC_VER, val);
10348         }
10349         bp->link_params.feature_config_flags |=
10350                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
10351                                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10352
10353         bp->link_params.feature_config_flags |=
10354                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10355                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
10356         bp->link_params.feature_config_flags |=
10357                 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10358                 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
10359         bp->link_params.feature_config_flags |=
10360                 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10361                 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
10362
10363         bp->link_params.feature_config_flags |=
10364                 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10365                 FEATURE_CONFIG_MT_SUPPORT : 0;
10366
10367         bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10368                         BC_SUPPORTS_PFC_STATS : 0;
10369
10370         bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10371                         BC_SUPPORTS_FCOE_FEATURES : 0;
10372
10373         bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10374                         BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
10375         boot_mode = SHMEM_RD(bp,
10376                         dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10377                         PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10378         switch (boot_mode) {
10379         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10380                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10381                 break;
10382         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10383                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10384                 break;
10385         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10386                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10387                 break;
10388         case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10389                 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10390                 break;
10391         }
10392
10393         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
10394         bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10395
10396         BNX2X_DEV_INFO("%sWoL capable\n",
10397                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
10398
10399         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10400         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10401         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10402         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10403
10404         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10405                  val, val2, val3, val4);
10406 }
10407
10408 #define IGU_FID(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10409 #define IGU_VEC(val)    GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10410
10411 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
10412 {
10413         int pfid = BP_FUNC(bp);
10414         int igu_sb_id;
10415         u32 val;
10416         u8 fid, igu_sb_cnt = 0;
10417
10418         bp->igu_base_sb = 0xff;
10419         if (CHIP_INT_MODE_IS_BC(bp)) {
10420                 int vn = BP_VN(bp);
10421                 igu_sb_cnt = bp->igu_sb_cnt;
10422                 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10423                         FP_SB_MAX_E1x;
10424
10425                 bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
10426                         (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10427
10428                 return 0;
10429         }
10430
10431         /* IGU in normal mode - read CAM */
10432         for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10433              igu_sb_id++) {
10434                 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10435                 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10436                         continue;
10437                 fid = IGU_FID(val);
10438                 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10439                         if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10440                                 continue;
10441                         if (IGU_VEC(val) == 0)
10442                                 /* default status block */
10443                                 bp->igu_dsb_id = igu_sb_id;
10444                         else {
10445                                 if (bp->igu_base_sb == 0xff)
10446                                         bp->igu_base_sb = igu_sb_id;
10447                                 igu_sb_cnt++;
10448                         }
10449                 }
10450         }
10451
10452 #ifdef CONFIG_PCI_MSI
10453         /* Due to new PF resource allocation by MFW T7.4 and above, it's
10454          * optional that number of CAM entries will not be equal to the value
10455          * advertised in PCI.
10456          * Driver should use the minimal value of both as the actual status
10457          * block count
10458          */
10459         bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
10460 #endif
10461
10462         if (igu_sb_cnt == 0) {
10463                 BNX2X_ERR("CAM configuration error\n");
10464                 return -EINVAL;
10465         }
10466
10467         return 0;
10468 }
10469
10470 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
10471 {
10472         int cfg_size = 0, idx, port = BP_PORT(bp);
10473
10474         /* Aggregation of supported attributes of all external phys */
10475         bp->port.supported[0] = 0;
10476         bp->port.supported[1] = 0;
10477         switch (bp->link_params.num_phys) {
10478         case 1:
10479                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10480                 cfg_size = 1;
10481                 break;
10482         case 2:
10483                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10484                 cfg_size = 1;
10485                 break;
10486         case 3:
10487                 if (bp->link_params.multi_phy_config &
10488                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10489                         bp->port.supported[1] =
10490                                 bp->link_params.phy[EXT_PHY1].supported;
10491                         bp->port.supported[0] =
10492                                 bp->link_params.phy[EXT_PHY2].supported;
10493                 } else {
10494                         bp->port.supported[0] =
10495                                 bp->link_params.phy[EXT_PHY1].supported;
10496                         bp->port.supported[1] =
10497                                 bp->link_params.phy[EXT_PHY2].supported;
10498                 }
10499                 cfg_size = 2;
10500                 break;
10501         }
10502
10503         if (!(bp->port.supported[0] || bp->port.supported[1])) {
10504                 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
10505                            SHMEM_RD(bp,
10506                            dev_info.port_hw_config[port].external_phy_config),
10507                            SHMEM_RD(bp,
10508                            dev_info.port_hw_config[port].external_phy_config2));
10509                         return;
10510         }
10511
10512         if (CHIP_IS_E3(bp))
10513                 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10514         else {
10515                 switch (switch_cfg) {
10516                 case SWITCH_CFG_1G:
10517                         bp->port.phy_addr = REG_RD(
10518                                 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10519                         break;
10520                 case SWITCH_CFG_10G:
10521                         bp->port.phy_addr = REG_RD(
10522                                 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10523                         break;
10524                 default:
10525                         BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10526                                   bp->port.link_config[0]);
10527                         return;
10528                 }
10529         }
10530         BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
10531         /* mask what we support according to speed_cap_mask per configuration */
10532         for (idx = 0; idx < cfg_size; idx++) {
10533                 if (!(bp->link_params.speed_cap_mask[idx] &
10534                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
10535                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
10536
10537                 if (!(bp->link_params.speed_cap_mask[idx] &
10538                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
10539                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
10540
10541                 if (!(bp->link_params.speed_cap_mask[idx] &
10542                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
10543                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
10544
10545                 if (!(bp->link_params.speed_cap_mask[idx] &
10546                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
10547                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
10548
10549                 if (!(bp->link_params.speed_cap_mask[idx] &
10550                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
10551                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
10552                                                      SUPPORTED_1000baseT_Full);
10553
10554                 if (!(bp->link_params.speed_cap_mask[idx] &
10555                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
10556                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
10557
10558                 if (!(bp->link_params.speed_cap_mask[idx] &
10559                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
10560                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10561         }
10562
10563         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10564                        bp->port.supported[1]);
10565 }
10566
10567 static void bnx2x_link_settings_requested(struct bnx2x *bp)
10568 {
10569         u32 link_config, idx, cfg_size = 0;
10570         bp->port.advertising[0] = 0;
10571         bp->port.advertising[1] = 0;
10572         switch (bp->link_params.num_phys) {
10573         case 1:
10574         case 2:
10575                 cfg_size = 1;
10576                 break;
10577         case 3:
10578                 cfg_size = 2;
10579                 break;
10580         }
10581         for (idx = 0; idx < cfg_size; idx++) {
10582                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10583                 link_config = bp->port.link_config[idx];
10584                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
10585                 case PORT_FEATURE_LINK_SPEED_AUTO:
10586                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10587                                 bp->link_params.req_line_speed[idx] =
10588                                         SPEED_AUTO_NEG;
10589                                 bp->port.advertising[idx] |=
10590                                         bp->port.supported[idx];
10591                                 if (bp->link_params.phy[EXT_PHY1].type ==
10592                                     PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10593                                         bp->port.advertising[idx] |=
10594                                         (SUPPORTED_100baseT_Half |
10595                                          SUPPORTED_100baseT_Full);
10596                         } else {
10597                                 /* force 10G, no AN */
10598                                 bp->link_params.req_line_speed[idx] =
10599                                         SPEED_10000;
10600                                 bp->port.advertising[idx] |=
10601                                         (ADVERTISED_10000baseT_Full |
10602                                          ADVERTISED_FIBRE);
10603                                 continue;
10604                         }
10605                         break;
10606
10607                 case PORT_FEATURE_LINK_SPEED_10M_FULL:
10608                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10609                                 bp->link_params.req_line_speed[idx] =
10610                                         SPEED_10;
10611                                 bp->port.advertising[idx] |=
10612                                         (ADVERTISED_10baseT_Full |
10613                                          ADVERTISED_TP);
10614                         } else {
10615                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10616                                             link_config,
10617                                     bp->link_params.speed_cap_mask[idx]);
10618                                 return;
10619                         }
10620                         break;
10621
10622                 case PORT_FEATURE_LINK_SPEED_10M_HALF:
10623                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10624                                 bp->link_params.req_line_speed[idx] =
10625                                         SPEED_10;
10626                                 bp->link_params.req_duplex[idx] =
10627                                         DUPLEX_HALF;
10628                                 bp->port.advertising[idx] |=
10629                                         (ADVERTISED_10baseT_Half |
10630                                          ADVERTISED_TP);
10631                         } else {
10632                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10633                                             link_config,
10634                                           bp->link_params.speed_cap_mask[idx]);
10635                                 return;
10636                         }
10637                         break;
10638
10639                 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10640                         if (bp->port.supported[idx] &
10641                             SUPPORTED_100baseT_Full) {
10642                                 bp->link_params.req_line_speed[idx] =
10643                                         SPEED_100;
10644                                 bp->port.advertising[idx] |=
10645                                         (ADVERTISED_100baseT_Full |
10646                                          ADVERTISED_TP);
10647                         } else {
10648                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10649                                             link_config,
10650                                           bp->link_params.speed_cap_mask[idx]);
10651                                 return;
10652                         }
10653                         break;
10654
10655                 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10656                         if (bp->port.supported[idx] &
10657                             SUPPORTED_100baseT_Half) {
10658                                 bp->link_params.req_line_speed[idx] =
10659                                                                 SPEED_100;
10660                                 bp->link_params.req_duplex[idx] =
10661                                                                 DUPLEX_HALF;
10662                                 bp->port.advertising[idx] |=
10663                                         (ADVERTISED_100baseT_Half |
10664                                          ADVERTISED_TP);
10665                         } else {
10666                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10667                                     link_config,
10668                                     bp->link_params.speed_cap_mask[idx]);
10669                                 return;
10670                         }
10671                         break;
10672
10673                 case PORT_FEATURE_LINK_SPEED_1G:
10674                         if (bp->port.supported[idx] &
10675                             SUPPORTED_1000baseT_Full) {
10676                                 bp->link_params.req_line_speed[idx] =
10677                                         SPEED_1000;
10678                                 bp->port.advertising[idx] |=
10679                                         (ADVERTISED_1000baseT_Full |
10680                                          ADVERTISED_TP);
10681                         } else {
10682                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10683                                     link_config,
10684                                     bp->link_params.speed_cap_mask[idx]);
10685                                 return;
10686                         }
10687                         break;
10688
10689                 case PORT_FEATURE_LINK_SPEED_2_5G:
10690                         if (bp->port.supported[idx] &
10691                             SUPPORTED_2500baseX_Full) {
10692                                 bp->link_params.req_line_speed[idx] =
10693                                         SPEED_2500;
10694                                 bp->port.advertising[idx] |=
10695                                         (ADVERTISED_2500baseX_Full |
10696                                                 ADVERTISED_TP);
10697                         } else {
10698                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10699                                     link_config,
10700                                     bp->link_params.speed_cap_mask[idx]);
10701                                 return;
10702                         }
10703                         break;
10704
10705                 case PORT_FEATURE_LINK_SPEED_10G_CX4:
10706                         if (bp->port.supported[idx] &
10707                             SUPPORTED_10000baseT_Full) {
10708                                 bp->link_params.req_line_speed[idx] =
10709                                         SPEED_10000;
10710                                 bp->port.advertising[idx] |=
10711                                         (ADVERTISED_10000baseT_Full |
10712                                                 ADVERTISED_FIBRE);
10713                         } else {
10714                                 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x  speed_cap_mask 0x%x\n",
10715                                     link_config,
10716                                     bp->link_params.speed_cap_mask[idx]);
10717                                 return;
10718                         }
10719                         break;
10720                 case PORT_FEATURE_LINK_SPEED_20G:
10721                         bp->link_params.req_line_speed[idx] = SPEED_20000;
10722
10723                         break;
10724                 default:
10725                         BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
10726                                   link_config);
10727                                 bp->link_params.req_line_speed[idx] =
10728                                                         SPEED_AUTO_NEG;
10729                                 bp->port.advertising[idx] =
10730                                                 bp->port.supported[idx];
10731                         break;
10732                 }
10733
10734                 bp->link_params.req_flow_ctrl[idx] = (link_config &
10735                                          PORT_FEATURE_FLOW_CONTROL_MASK);
10736                 if (bp->link_params.req_flow_ctrl[idx] ==
10737                     BNX2X_FLOW_CTRL_AUTO) {
10738                         if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10739                                 bp->link_params.req_flow_ctrl[idx] =
10740                                                         BNX2X_FLOW_CTRL_NONE;
10741                         else
10742                                 bnx2x_set_requested_fc(bp);
10743                 }
10744
10745                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
10746                                bp->link_params.req_line_speed[idx],
10747                                bp->link_params.req_duplex[idx],
10748                                bp->link_params.req_flow_ctrl[idx],
10749                                bp->port.advertising[idx]);
10750         }
10751 }
10752
10753 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
10754 {
10755         __be16 mac_hi_be = cpu_to_be16(mac_hi);
10756         __be32 mac_lo_be = cpu_to_be32(mac_lo);
10757         memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
10758         memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
10759 }
10760
10761 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
10762 {
10763         int port = BP_PORT(bp);
10764         u32 config;
10765         u32 ext_phy_type, ext_phy_config, eee_mode;
10766
10767         bp->link_params.bp = bp;
10768         bp->link_params.port = port;
10769
10770         bp->link_params.lane_config =
10771                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
10772
10773         bp->link_params.speed_cap_mask[0] =
10774                 SHMEM_RD(bp,
10775                          dev_info.port_hw_config[port].speed_capability_mask) &
10776                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10777         bp->link_params.speed_cap_mask[1] =
10778                 SHMEM_RD(bp,
10779                          dev_info.port_hw_config[port].speed_capability_mask2) &
10780                 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
10781         bp->port.link_config[0] =
10782                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10783
10784         bp->port.link_config[1] =
10785                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
10786
10787         bp->link_params.multi_phy_config =
10788                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
10789         /* If the device is capable of WoL, set the default state according
10790          * to the HW
10791          */
10792         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
10793         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10794                    (config & PORT_FEATURE_WOL_ENABLED));
10795
10796         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10797             PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
10798                 bp->flags |= NO_ISCSI_FLAG;
10799         if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10800             PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
10801                 bp->flags |= NO_FCOE_FLAG;
10802
10803         BNX2X_DEV_INFO("lane_config 0x%08x  speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
10804                        bp->link_params.lane_config,
10805                        bp->link_params.speed_cap_mask[0],
10806                        bp->port.link_config[0]);
10807
10808         bp->link_params.switch_cfg = (bp->port.link_config[0] &
10809                                       PORT_FEATURE_CONNECTED_SWITCH_MASK);
10810         bnx2x_phy_probe(&bp->link_params);
10811         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
10812
10813         bnx2x_link_settings_requested(bp);
10814
10815         /*
10816          * If connected directly, work with the internal PHY, otherwise, work
10817          * with the external PHY
10818          */
10819         ext_phy_config =
10820                 SHMEM_RD(bp,
10821                          dev_info.port_hw_config[port].external_phy_config);
10822         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
10823         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
10824                 bp->mdio.prtad = bp->port.phy_addr;
10825
10826         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10827                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10828                 bp->mdio.prtad =
10829                         XGXS_EXT_PHY_ADDR(ext_phy_config);
10830
10831         /* Configure link feature according to nvram value */
10832         eee_mode = (((SHMEM_RD(bp, dev_info.
10833                       port_feature_config[port].eee_power_mode)) &
10834                      PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10835                     PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10836         if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10837                 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10838                                            EEE_MODE_ENABLE_LPI |
10839                                            EEE_MODE_OUTPUT_TIME;
10840         } else {
10841                 bp->link_params.eee_mode = 0;
10842         }
10843 }
10844
10845 void bnx2x_get_iscsi_info(struct bnx2x *bp)
10846 {
10847         u32 no_flags = NO_ISCSI_FLAG;
10848         int port = BP_PORT(bp);
10849         u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10850                                 drv_lic_key[port].max_iscsi_conn);
10851
10852         if (!CNIC_SUPPORT(bp)) {
10853                 bp->flags |= no_flags;
10854                 return;
10855         }
10856
10857         /* Get the number of maximum allowed iSCSI connections */
10858         bp->cnic_eth_dev.max_iscsi_conn =
10859                 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10860                 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10861
10862         BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10863                        bp->cnic_eth_dev.max_iscsi_conn);
10864
10865         /*
10866          * If maximum allowed number of connections is zero -
10867          * disable the feature.
10868          */
10869         if (!bp->cnic_eth_dev.max_iscsi_conn)
10870                 bp->flags |= no_flags;
10871 }
10872
10873 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
10874 {
10875         /* Port info */
10876         bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10877                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10878         bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10879                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10880
10881         /* Node info */
10882         bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10883                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10884         bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10885                 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10886 }
10887
10888 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
10889 {
10890         u8 count = 0;
10891
10892         if (IS_MF(bp)) {
10893                 u8 fid;
10894
10895                 /* iterate over absolute function ids for this path: */
10896                 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
10897                         if (IS_MF_SD(bp)) {
10898                                 u32 cfg = MF_CFG_RD(bp,
10899                                                     func_mf_config[fid].config);
10900
10901                                 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
10902                                     ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
10903                                             FUNC_MF_CFG_PROTOCOL_FCOE))
10904                                         count++;
10905                         } else {
10906                                 u32 cfg = MF_CFG_RD(bp,
10907                                                     func_ext_config[fid].
10908                                                                       func_cfg);
10909
10910                                 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
10911                                     (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
10912                                         count++;
10913                         }
10914                 }
10915         } else { /* SF */
10916                 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
10917
10918                 for (port = 0; port < port_cnt; port++) {
10919                         u32 lic = SHMEM_RD(bp,
10920                                            drv_lic_key[port].max_fcoe_conn) ^
10921                                   FW_ENCODE_32BIT_PATTERN;
10922                         if (lic)
10923                                 count++;
10924                 }
10925         }
10926
10927         return count;
10928 }
10929
10930 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
10931 {
10932         int port = BP_PORT(bp);
10933         int func = BP_ABS_FUNC(bp);
10934         u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10935                                 drv_lic_key[port].max_fcoe_conn);
10936         u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
10937
10938         if (!CNIC_SUPPORT(bp)) {
10939                 bp->flags |= NO_FCOE_FLAG;
10940                 return;
10941         }
10942
10943         /* Get the number of maximum allowed FCoE connections */
10944         bp->cnic_eth_dev.max_fcoe_conn =
10945                 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10946                 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10947
10948         /* Calculate the number of maximum allowed FCoE tasks */
10949         bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
10950
10951         /* check if FCoE resources must be shared between different functions */
10952         if (num_fcoe_func)
10953                 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
10954
10955         /* Read the WWN: */
10956         if (!IS_MF(bp)) {
10957                 /* Port info */
10958                 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10959                         SHMEM_RD(bp,
10960                                  dev_info.port_hw_config[port].
10961                                  fcoe_wwn_port_name_upper);
10962                 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10963                         SHMEM_RD(bp,
10964                                  dev_info.port_hw_config[port].
10965                                  fcoe_wwn_port_name_lower);
10966
10967                 /* Node info */
10968                 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10969                         SHMEM_RD(bp,
10970                                  dev_info.port_hw_config[port].
10971                                  fcoe_wwn_node_name_upper);
10972                 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10973                         SHMEM_RD(bp,
10974                                  dev_info.port_hw_config[port].
10975                                  fcoe_wwn_node_name_lower);
10976         } else if (!IS_MF_SD(bp)) {
10977                 /*
10978                  * Read the WWN info only if the FCoE feature is enabled for
10979                  * this function.
10980                  */
10981                 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
10982                         bnx2x_get_ext_wwn_info(bp, func);
10983
10984         } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
10985                 bnx2x_get_ext_wwn_info(bp, func);
10986         }
10987
10988         BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
10989
10990         /*
10991          * If maximum allowed number of connections is zero -
10992          * disable the feature.
10993          */
10994         if (!bp->cnic_eth_dev.max_fcoe_conn)
10995                 bp->flags |= NO_FCOE_FLAG;
10996 }
10997
10998 static void bnx2x_get_cnic_info(struct bnx2x *bp)
10999 {
11000         /*
11001          * iSCSI may be dynamically disabled but reading
11002          * info here we will decrease memory usage by driver
11003          * if the feature is disabled for good
11004          */
11005         bnx2x_get_iscsi_info(bp);
11006         bnx2x_get_fcoe_info(bp);
11007 }
11008
11009 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11010 {
11011         u32 val, val2;
11012         int func = BP_ABS_FUNC(bp);
11013         int port = BP_PORT(bp);
11014         u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11015         u8 *fip_mac = bp->fip_mac;
11016
11017         if (IS_MF(bp)) {
11018                 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
11019                  * FCoE MAC then the appropriate feature should be disabled.
11020                  * In non SD mode features configuration comes from struct
11021                  * func_ext_config.
11022                  */
11023                 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
11024                         u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11025                         if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11026                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11027                                                  iscsi_mac_addr_upper);
11028                                 val = MF_CFG_RD(bp, func_ext_config[func].
11029                                                 iscsi_mac_addr_lower);
11030                                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11031                                 BNX2X_DEV_INFO
11032                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11033                         } else {
11034                                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11035                         }
11036
11037                         if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11038                                 val2 = MF_CFG_RD(bp, func_ext_config[func].
11039                                                  fcoe_mac_addr_upper);
11040                                 val = MF_CFG_RD(bp, func_ext_config[func].
11041                                                 fcoe_mac_addr_lower);
11042                                 bnx2x_set_mac_buf(fip_mac, val, val2);
11043                                 BNX2X_DEV_INFO
11044                                         ("Read FCoE L2 MAC: %pM\n", fip_mac);
11045                         } else {
11046                                 bp->flags |= NO_FCOE_FLAG;
11047                         }
11048
11049                         bp->mf_ext_config = cfg;
11050
11051                 } else { /* SD MODE */
11052                         if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11053                                 /* use primary mac as iscsi mac */
11054                                 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11055
11056                                 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11057                                 BNX2X_DEV_INFO
11058                                         ("Read iSCSI MAC: %pM\n", iscsi_mac);
11059                         } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11060                                 /* use primary mac as fip mac */
11061                                 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11062                                 BNX2X_DEV_INFO("SD FCoE MODE\n");
11063                                 BNX2X_DEV_INFO
11064                                         ("Read FIP MAC: %pM\n", fip_mac);
11065                         }
11066                 }
11067
11068                 /* If this is a storage-only interface, use SAN mac as
11069                  * primary MAC. Notice that for SD this is already the case,
11070                  * as the SAN mac was copied from the primary MAC.
11071                  */
11072                 if (IS_MF_FCOE_AFEX(bp))
11073                         memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
11074         } else {
11075                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11076                                 iscsi_mac_upper);
11077                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11078                                iscsi_mac_lower);
11079                 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11080
11081                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11082                                 fcoe_fip_mac_upper);
11083                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11084                                fcoe_fip_mac_lower);
11085                 bnx2x_set_mac_buf(fip_mac, val, val2);
11086         }
11087
11088         /* Disable iSCSI OOO if MAC configuration is invalid. */
11089         if (!is_valid_ether_addr(iscsi_mac)) {
11090                 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11091                 memset(iscsi_mac, 0, ETH_ALEN);
11092         }
11093
11094         /* Disable FCoE if MAC configuration is invalid. */
11095         if (!is_valid_ether_addr(fip_mac)) {
11096                 bp->flags |= NO_FCOE_FLAG;
11097                 memset(bp->fip_mac, 0, ETH_ALEN);
11098         }
11099 }
11100
11101 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11102 {
11103         u32 val, val2;
11104         int func = BP_ABS_FUNC(bp);
11105         int port = BP_PORT(bp);
11106
11107         /* Zero primary MAC configuration */
11108         memset(bp->dev->dev_addr, 0, ETH_ALEN);
11109
11110         if (BP_NOMCP(bp)) {
11111                 BNX2X_ERROR("warning: random MAC workaround active\n");
11112                 eth_hw_addr_random(bp->dev);
11113         } else if (IS_MF(bp)) {
11114                 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11115                 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11116                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11117                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11118                         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11119
11120                 if (CNIC_SUPPORT(bp))
11121                         bnx2x_get_cnic_mac_hwinfo(bp);
11122         } else {
11123                 /* in SF read MACs from port configuration */
11124                 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11125                 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11126                 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11127
11128                 if (CNIC_SUPPORT(bp))
11129                         bnx2x_get_cnic_mac_hwinfo(bp);
11130         }
11131
11132         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11133
11134         if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
11135                 dev_err(&bp->pdev->dev,
11136                         "bad Ethernet MAC address configuration: %pM\n"
11137                         "change it manually before bringing up the appropriate network interface\n",
11138                         bp->dev->dev_addr);
11139 }
11140
11141 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11142 {
11143         int tmp;
11144         u32 cfg;
11145
11146         if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11147                 /* Take function: tmp = func */
11148                 tmp = BP_ABS_FUNC(bp);
11149                 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11150                 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11151         } else {
11152                 /* Take port: tmp = port */
11153                 tmp = BP_PORT(bp);
11154                 cfg = SHMEM_RD(bp,
11155                                dev_info.port_hw_config[tmp].generic_features);
11156                 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11157         }
11158         return cfg;
11159 }
11160
11161 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11162 {
11163         int /*abs*/func = BP_ABS_FUNC(bp);
11164         int vn;
11165         u32 val = 0;
11166         int rc = 0;
11167
11168         bnx2x_get_common_hwinfo(bp);
11169
11170         /*
11171          * initialize IGU parameters
11172          */
11173         if (CHIP_IS_E1x(bp)) {
11174                 bp->common.int_block = INT_BLOCK_HC;
11175
11176                 bp->igu_dsb_id = DEF_SB_IGU_ID;
11177                 bp->igu_base_sb = 0;
11178         } else {
11179                 bp->common.int_block = INT_BLOCK_IGU;
11180
11181                 /* do not allow device reset during IGU info processing */
11182                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11183
11184                 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11185
11186                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11187                         int tout = 5000;
11188
11189                         BNX2X_DEV_INFO("FORCING Normal Mode\n");
11190
11191                         val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11192                         REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11193                         REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11194
11195                         while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11196                                 tout--;
11197                                 usleep_range(1000, 2000);
11198                         }
11199
11200                         if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11201                                 dev_err(&bp->pdev->dev,
11202                                         "FORCING Normal Mode failed!!!\n");
11203                                 bnx2x_release_hw_lock(bp,
11204                                                       HW_LOCK_RESOURCE_RESET);
11205                                 return -EPERM;
11206                         }
11207                 }
11208
11209                 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11210                         BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11211                         bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11212                 } else
11213                         BNX2X_DEV_INFO("IGU Normal Mode\n");
11214
11215                 rc = bnx2x_get_igu_cam_info(bp);
11216                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11217                 if (rc)
11218                         return rc;
11219         }
11220
11221         /*
11222          * set base FW non-default (fast path) status block id, this value is
11223          * used to initialize the fw_sb_id saved on the fp/queue structure to
11224          * determine the id used by the FW.
11225          */
11226         if (CHIP_IS_E1x(bp))
11227                 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11228         else /*
11229               * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11230               * the same queue are indicated on the same IGU SB). So we prefer
11231               * FW and IGU SBs to be the same value.
11232               */
11233                 bp->base_fw_ndsb = bp->igu_base_sb;
11234
11235         BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
11236                        "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11237                        bp->igu_sb_cnt, bp->base_fw_ndsb);
11238
11239         /*
11240          * Initialize MF configuration
11241          */
11242
11243         bp->mf_ov = 0;
11244         bp->mf_mode = 0;
11245         vn = BP_VN(bp);
11246
11247         if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
11248                 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11249                                bp->common.shmem2_base, SHMEM2_RD(bp, size),
11250                               (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11251
11252                 if (SHMEM2_HAS(bp, mf_cfg_addr))
11253                         bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11254                 else
11255                         bp->common.mf_cfg_base = bp->common.shmem_base +
11256                                 offsetof(struct shmem_region, func_mb) +
11257                                 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
11258                 /*
11259                  * get mf configuration:
11260                  * 1. Existence of MF configuration
11261                  * 2. MAC address must be legal (check only upper bytes)
11262                  *    for  Switch-Independent mode;
11263                  *    OVLAN must be legal for Switch-Dependent mode
11264                  * 3. SF_MODE configures specific MF mode
11265                  */
11266                 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11267                         /* get mf configuration */
11268                         val = SHMEM_RD(bp,
11269                                        dev_info.shared_feature_config.config);
11270                         val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11271
11272                         switch (val) {
11273                         case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11274                                 val = MF_CFG_RD(bp, func_mf_config[func].
11275                                                 mac_upper);
11276                                 /* check for legal mac (upper bytes)*/
11277                                 if (val != 0xffff) {
11278                                         bp->mf_mode = MULTI_FUNCTION_SI;
11279                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11280                                                    func_mf_config[func].config);
11281                                 } else
11282                                         BNX2X_DEV_INFO("illegal MAC address for SI\n");
11283                                 break;
11284                         case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11285                                 if ((!CHIP_IS_E1x(bp)) &&
11286                                     (MF_CFG_RD(bp, func_mf_config[func].
11287                                                mac_upper) != 0xffff) &&
11288                                     (SHMEM2_HAS(bp,
11289                                                 afex_driver_support))) {
11290                                         bp->mf_mode = MULTI_FUNCTION_AFEX;
11291                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11292                                                 func_mf_config[func].config);
11293                                 } else {
11294                                         BNX2X_DEV_INFO("can not configure afex mode\n");
11295                                 }
11296                                 break;
11297                         case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11298                                 /* get OV configuration */
11299                                 val = MF_CFG_RD(bp,
11300                                         func_mf_config[FUNC_0].e1hov_tag);
11301                                 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11302
11303                                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11304                                         bp->mf_mode = MULTI_FUNCTION_SD;
11305                                         bp->mf_config[vn] = MF_CFG_RD(bp,
11306                                                 func_mf_config[func].config);
11307                                 } else
11308                                         BNX2X_DEV_INFO("illegal OV for SD\n");
11309                                 break;
11310                         case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11311                                 bp->mf_config[vn] = 0;
11312                                 break;
11313                         default:
11314                                 /* Unknown configuration: reset mf_config */
11315                                 bp->mf_config[vn] = 0;
11316                                 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
11317                         }
11318                 }
11319
11320                 BNX2X_DEV_INFO("%s function mode\n",
11321                                IS_MF(bp) ? "multi" : "single");
11322
11323                 switch (bp->mf_mode) {
11324                 case MULTI_FUNCTION_SD:
11325                         val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11326                               FUNC_MF_CFG_E1HOV_TAG_MASK;
11327                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11328                                 bp->mf_ov = val;
11329                                 bp->path_has_ovlan = true;
11330
11331                                 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11332                                                func, bp->mf_ov, bp->mf_ov);
11333                         } else {
11334                                 dev_err(&bp->pdev->dev,
11335                                         "No valid MF OV for func %d, aborting\n",
11336                                         func);
11337                                 return -EPERM;
11338                         }
11339                         break;
11340                 case MULTI_FUNCTION_AFEX:
11341                         BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11342                         break;
11343                 case MULTI_FUNCTION_SI:
11344                         BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11345                                        func);
11346                         break;
11347                 default:
11348                         if (vn) {
11349                                 dev_err(&bp->pdev->dev,
11350                                         "VN %d is in a single function mode, aborting\n",
11351                                         vn);
11352                                 return -EPERM;
11353                         }
11354                         break;
11355                 }
11356
11357                 /* check if other port on the path needs ovlan:
11358                  * Since MF configuration is shared between ports
11359                  * Possible mixed modes are only
11360                  * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
11361                  */
11362                 if (CHIP_MODE_IS_4_PORT(bp) &&
11363                     !bp->path_has_ovlan &&
11364                     !IS_MF(bp) &&
11365                     bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11366                         u8 other_port = !BP_PORT(bp);
11367                         u8 other_func = BP_PATH(bp) + 2*other_port;
11368                         val = MF_CFG_RD(bp,
11369                                         func_mf_config[other_func].e1hov_tag);
11370                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11371                                 bp->path_has_ovlan = true;
11372                 }
11373         }
11374
11375         /* adjust igu_sb_cnt to MF for E1x */
11376         if (CHIP_IS_E1x(bp) && IS_MF(bp))
11377                 bp->igu_sb_cnt /= E1HVN_MAX;
11378
11379         /* port info */
11380         bnx2x_get_port_hwinfo(bp);
11381
11382         /* Get MAC addresses */
11383         bnx2x_get_mac_hwinfo(bp);
11384
11385         bnx2x_get_cnic_info(bp);
11386
11387         return rc;
11388 }
11389
11390 static void bnx2x_read_fwinfo(struct bnx2x *bp)
11391 {
11392         int cnt, i, block_end, rodi;
11393         char vpd_start[BNX2X_VPD_LEN+1];
11394         char str_id_reg[VENDOR_ID_LEN+1];
11395         char str_id_cap[VENDOR_ID_LEN+1];
11396         char *vpd_data;
11397         char *vpd_extended_data = NULL;
11398         u8 len;
11399
11400         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
11401         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11402
11403         if (cnt < BNX2X_VPD_LEN)
11404                 goto out_not_found;
11405
11406         /* VPD RO tag should be first tag after identifier string, hence
11407          * we should be able to find it in first BNX2X_VPD_LEN chars
11408          */
11409         i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
11410                              PCI_VPD_LRDT_RO_DATA);
11411         if (i < 0)
11412                 goto out_not_found;
11413
11414         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
11415                     pci_vpd_lrdt_size(&vpd_start[i]);
11416
11417         i += PCI_VPD_LRDT_TAG_SIZE;
11418
11419         if (block_end > BNX2X_VPD_LEN) {
11420                 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11421                 if (vpd_extended_data  == NULL)
11422                         goto out_not_found;
11423
11424                 /* read rest of vpd image into vpd_extended_data */
11425                 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11426                 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11427                                    block_end - BNX2X_VPD_LEN,
11428                                    vpd_extended_data + BNX2X_VPD_LEN);
11429                 if (cnt < (block_end - BNX2X_VPD_LEN))
11430                         goto out_not_found;
11431                 vpd_data = vpd_extended_data;
11432         } else
11433                 vpd_data = vpd_start;
11434
11435         /* now vpd_data holds full vpd content in both cases */
11436
11437         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11438                                    PCI_VPD_RO_KEYWORD_MFR_ID);
11439         if (rodi < 0)
11440                 goto out_not_found;
11441
11442         len = pci_vpd_info_field_size(&vpd_data[rodi]);
11443
11444         if (len != VENDOR_ID_LEN)
11445                 goto out_not_found;
11446
11447         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11448
11449         /* vendor specific info */
11450         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11451         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11452         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11453             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11454
11455                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11456                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
11457                 if (rodi >= 0) {
11458                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
11459
11460                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11461
11462                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11463                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11464                                 bp->fw_ver[len] = ' ';
11465                         }
11466                 }
11467                 kfree(vpd_extended_data);
11468                 return;
11469         }
11470 out_not_found:
11471         kfree(vpd_extended_data);
11472         return;
11473 }
11474
11475 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
11476 {
11477         u32 flags = 0;
11478
11479         if (CHIP_REV_IS_FPGA(bp))
11480                 SET_FLAGS(flags, MODE_FPGA);
11481         else if (CHIP_REV_IS_EMUL(bp))
11482                 SET_FLAGS(flags, MODE_EMUL);
11483         else
11484                 SET_FLAGS(flags, MODE_ASIC);
11485
11486         if (CHIP_MODE_IS_4_PORT(bp))
11487                 SET_FLAGS(flags, MODE_PORT4);
11488         else
11489                 SET_FLAGS(flags, MODE_PORT2);
11490
11491         if (CHIP_IS_E2(bp))
11492                 SET_FLAGS(flags, MODE_E2);
11493         else if (CHIP_IS_E3(bp)) {
11494                 SET_FLAGS(flags, MODE_E3);
11495                 if (CHIP_REV(bp) == CHIP_REV_Ax)
11496                         SET_FLAGS(flags, MODE_E3_A0);
11497                 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
11498                         SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
11499         }
11500
11501         if (IS_MF(bp)) {
11502                 SET_FLAGS(flags, MODE_MF);
11503                 switch (bp->mf_mode) {
11504                 case MULTI_FUNCTION_SD:
11505                         SET_FLAGS(flags, MODE_MF_SD);
11506                         break;
11507                 case MULTI_FUNCTION_SI:
11508                         SET_FLAGS(flags, MODE_MF_SI);
11509                         break;
11510                 case MULTI_FUNCTION_AFEX:
11511                         SET_FLAGS(flags, MODE_MF_AFEX);
11512                         break;
11513                 }
11514         } else
11515                 SET_FLAGS(flags, MODE_SF);
11516
11517 #if defined(__LITTLE_ENDIAN)
11518         SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11519 #else /*(__BIG_ENDIAN)*/
11520         SET_FLAGS(flags, MODE_BIG_ENDIAN);
11521 #endif
11522         INIT_MODE_FLAGS(bp) = flags;
11523 }
11524
11525 static int bnx2x_init_bp(struct bnx2x *bp)
11526 {
11527         int func;
11528         int rc;
11529
11530         mutex_init(&bp->port.phy_mutex);
11531         mutex_init(&bp->fw_mb_mutex);
11532         spin_lock_init(&bp->stats_lock);
11533
11534         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
11535         INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
11536         INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
11537         if (IS_PF(bp)) {
11538                 rc = bnx2x_get_hwinfo(bp);
11539                 if (rc)
11540                         return rc;
11541         } else {
11542                 eth_zero_addr(bp->dev->dev_addr);
11543         }
11544
11545         bnx2x_set_modes_bitmap(bp);
11546
11547         rc = bnx2x_alloc_mem_bp(bp);
11548         if (rc)
11549                 return rc;
11550
11551         bnx2x_read_fwinfo(bp);
11552
11553         func = BP_FUNC(bp);
11554
11555         /* need to reset chip if undi was active */
11556         if (IS_PF(bp) && !BP_NOMCP(bp)) {
11557                 /* init fw_seq */
11558                 bp->fw_seq =
11559                         SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11560                                                         DRV_MSG_SEQ_NUMBER_MASK;
11561                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11562
11563                 bnx2x_prev_unload(bp);
11564         }
11565
11566         if (CHIP_REV_IS_FPGA(bp))
11567                 dev_err(&bp->pdev->dev, "FPGA detected\n");
11568
11569         if (BP_NOMCP(bp) && (func == 0))
11570                 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
11571
11572         bp->disable_tpa = disable_tpa;
11573         bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
11574
11575         /* Set TPA flags */
11576         if (bp->disable_tpa) {
11577                 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11578                 bp->dev->features &= ~NETIF_F_LRO;
11579         } else {
11580                 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
11581                 bp->dev->features |= NETIF_F_LRO;
11582         }
11583
11584         if (CHIP_IS_E1(bp))
11585                 bp->dropless_fc = 0;
11586         else
11587                 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
11588
11589         bp->mrrs = mrrs;
11590
11591         bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
11592         if (IS_VF(bp))
11593                 bp->rx_ring_size = MAX_RX_AVAIL;
11594
11595         /* make sure that the numbers are in the right granularity */
11596         bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11597         bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
11598
11599         bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
11600
11601         init_timer(&bp->timer);
11602         bp->timer.expires = jiffies + bp->current_interval;
11603         bp->timer.data = (unsigned long) bp;
11604         bp->timer.function = bnx2x_timer;
11605
11606         if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11607             SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11608             SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11609             SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11610                 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11611                 bnx2x_dcbx_init_params(bp);
11612         } else {
11613                 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11614         }
11615
11616         if (CHIP_IS_E1x(bp))
11617                 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11618         else
11619                 bp->cnic_base_cl_id = FP_SB_MAX_E2;
11620
11621         /* multiple tx priority */
11622         if (IS_VF(bp))
11623                 bp->max_cos = 1;
11624         else if (CHIP_IS_E1x(bp))
11625                 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
11626         else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
11627                 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
11628         else if (CHIP_IS_E3B0(bp))
11629                 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
11630         else
11631                 BNX2X_ERR("unknown chip %x revision %x\n",
11632                           CHIP_NUM(bp), CHIP_REV(bp));
11633         BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
11634
11635         /* We need at least one default status block for slow-path events,
11636          * second status block for the L2 queue, and a third status block for
11637          * CNIC if supported.
11638          */
11639         if (CNIC_SUPPORT(bp))
11640                 bp->min_msix_vec_cnt = 3;
11641         else
11642                 bp->min_msix_vec_cnt = 2;
11643         BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11644
11645         return rc;
11646 }
11647
11648 /****************************************************************************
11649 * General service functions
11650 ****************************************************************************/
11651
11652 /*
11653  * net_device service functions
11654  */
11655
11656 /* called with rtnl_lock */
11657 static int bnx2x_open(struct net_device *dev)
11658 {
11659         struct bnx2x *bp = netdev_priv(dev);
11660         bool global = false;
11661         int other_engine = BP_PATH(bp) ? 0 : 1;
11662         bool other_load_status, load_status;
11663         int rc;
11664
11665         bp->stats_init = true;
11666
11667         netif_carrier_off(dev);
11668
11669         bnx2x_set_power_state(bp, PCI_D0);
11670
11671         /* If parity had happen during the unload, then attentions
11672          * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11673          * want the first function loaded on the current engine to
11674          * complete the recovery.
11675          * Parity recovery is only relevant for PF driver.
11676          */
11677         if (IS_PF(bp)) {
11678                 other_load_status = bnx2x_get_load_status(bp, other_engine);
11679                 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11680                 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11681                     bnx2x_chk_parity_attn(bp, &global, true)) {
11682                         do {
11683                                 /* If there are attentions and they are in a
11684                                  * global blocks, set the GLOBAL_RESET bit
11685                                  * regardless whether it will be this function
11686                                  * that will complete the recovery or not.
11687                                  */
11688                                 if (global)
11689                                         bnx2x_set_reset_global(bp);
11690
11691                                 /* Only the first function on the current
11692                                  * engine should try to recover in open. In case
11693                                  * of attentions in global blocks only the first
11694                                  * in the chip should try to recover.
11695                                  */
11696                                 if ((!load_status &&
11697                                      (!global || !other_load_status)) &&
11698                                       bnx2x_trylock_leader_lock(bp) &&
11699                                       !bnx2x_leader_reset(bp)) {
11700                                         netdev_info(bp->dev,
11701                                                     "Recovered in open\n");
11702                                         break;
11703                                 }
11704
11705                                 /* recovery has failed... */
11706                                 bnx2x_set_power_state(bp, PCI_D3hot);
11707                                 bp->recovery_state = BNX2X_RECOVERY_FAILED;
11708
11709                                 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11710                                           "If you still see this message after a few retries then power cycle is required.\n");
11711
11712                                 return -EAGAIN;
11713                         } while (0);
11714                 }
11715         }
11716
11717         bp->recovery_state = BNX2X_RECOVERY_DONE;
11718         rc = bnx2x_nic_load(bp, LOAD_OPEN);
11719         if (rc)
11720                 return rc;
11721         return bnx2x_open_epilog(bp);
11722 }
11723
11724 /* called with rtnl_lock */
11725 static int bnx2x_close(struct net_device *dev)
11726 {
11727         struct bnx2x *bp = netdev_priv(dev);
11728
11729         /* Unload the driver, release IRQs */
11730         bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
11731
11732         return 0;
11733 }
11734
11735 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11736                                       struct bnx2x_mcast_ramrod_params *p)
11737 {
11738         int mc_count = netdev_mc_count(bp->dev);
11739         struct bnx2x_mcast_list_elem *mc_mac =
11740                 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11741         struct netdev_hw_addr *ha;
11742
11743         if (!mc_mac)
11744                 return -ENOMEM;
11745
11746         INIT_LIST_HEAD(&p->mcast_list);
11747
11748         netdev_for_each_mc_addr(ha, bp->dev) {
11749                 mc_mac->mac = bnx2x_mc_addr(ha);
11750                 list_add_tail(&mc_mac->link, &p->mcast_list);
11751                 mc_mac++;
11752         }
11753
11754         p->mcast_list_len = mc_count;
11755
11756         return 0;
11757 }
11758
11759 static void bnx2x_free_mcast_macs_list(
11760         struct bnx2x_mcast_ramrod_params *p)
11761 {
11762         struct bnx2x_mcast_list_elem *mc_mac =
11763                 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11764                                  link);
11765
11766         WARN_ON(!mc_mac);
11767         kfree(mc_mac);
11768 }
11769
11770 /**
11771  * bnx2x_set_uc_list - configure a new unicast MACs list.
11772  *
11773  * @bp: driver handle
11774  *
11775  * We will use zero (0) as a MAC type for these MACs.
11776  */
11777 static int bnx2x_set_uc_list(struct bnx2x *bp)
11778 {
11779         int rc;
11780         struct net_device *dev = bp->dev;
11781         struct netdev_hw_addr *ha;
11782         struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
11783         unsigned long ramrod_flags = 0;
11784
11785         /* First schedule a cleanup up of old configuration */
11786         rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11787         if (rc < 0) {
11788                 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11789                 return rc;
11790         }
11791
11792         netdev_for_each_uc_addr(ha, dev) {
11793                 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11794                                        BNX2X_UC_LIST_MAC, &ramrod_flags);
11795                 if (rc == -EEXIST) {
11796                         DP(BNX2X_MSG_SP,
11797                            "Failed to schedule ADD operations: %d\n", rc);
11798                         /* do not treat adding same MAC as error */
11799                         rc = 0;
11800
11801                 } else if (rc < 0) {
11802
11803                         BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11804                                   rc);
11805                         return rc;
11806                 }
11807         }
11808
11809         /* Execute the pending commands */
11810         __set_bit(RAMROD_CONT, &ramrod_flags);
11811         return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11812                                  BNX2X_UC_LIST_MAC, &ramrod_flags);
11813 }
11814
11815 static int bnx2x_set_mc_list(struct bnx2x *bp)
11816 {
11817         struct net_device *dev = bp->dev;
11818         struct bnx2x_mcast_ramrod_params rparam = {NULL};
11819         int rc = 0;
11820
11821         rparam.mcast_obj = &bp->mcast_obj;
11822
11823         /* first, clear all configured multicast MACs */
11824         rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11825         if (rc < 0) {
11826                 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
11827                 return rc;
11828         }
11829
11830         /* then, configure a new MACs list */
11831         if (netdev_mc_count(dev)) {
11832                 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11833                 if (rc) {
11834                         BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11835                                   rc);
11836                         return rc;
11837                 }
11838
11839                 /* Now add the new MACs */
11840                 rc = bnx2x_config_mcast(bp, &rparam,
11841                                         BNX2X_MCAST_CMD_ADD);
11842                 if (rc < 0)
11843                         BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11844                                   rc);
11845
11846                 bnx2x_free_mcast_macs_list(&rparam);
11847         }
11848
11849         return rc;
11850 }
11851
11852 /* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
11853 void bnx2x_set_rx_mode(struct net_device *dev)
11854 {
11855         struct bnx2x *bp = netdev_priv(dev);
11856         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11857
11858         if (bp->state != BNX2X_STATE_OPEN) {
11859                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11860                 return;
11861         }
11862
11863         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
11864
11865         if (dev->flags & IFF_PROMISC)
11866                 rx_mode = BNX2X_RX_MODE_PROMISC;
11867         else if ((dev->flags & IFF_ALLMULTI) ||
11868                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11869                   CHIP_IS_E1(bp)))
11870                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11871         else {
11872                 if (IS_PF(bp)) {
11873                         /* some multicasts */
11874                         if (bnx2x_set_mc_list(bp) < 0)
11875                                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11876
11877                         if (bnx2x_set_uc_list(bp) < 0)
11878                                 rx_mode = BNX2X_RX_MODE_PROMISC;
11879                 } else {
11880                         /* configuring mcast to a vf involves sleeping (when we
11881                          * wait for the pf's response). Since this function is
11882                          * called from non sleepable context we must schedule
11883                          * a work item for this purpose
11884                          */
11885                         smp_mb__before_clear_bit();
11886                         set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
11887                                 &bp->sp_rtnl_state);
11888                         smp_mb__after_clear_bit();
11889                         schedule_delayed_work(&bp->sp_rtnl_task, 0);
11890                 }
11891         }
11892
11893         bp->rx_mode = rx_mode;
11894         /* handle ISCSI SD mode */
11895         if (IS_MF_ISCSI_SD(bp))
11896                 bp->rx_mode = BNX2X_RX_MODE_NONE;
11897
11898         /* Schedule the rx_mode command */
11899         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11900                 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11901                 return;
11902         }
11903
11904         if (IS_PF(bp)) {
11905                 bnx2x_set_storm_rx_mode(bp);
11906         } else {
11907                 /* configuring rx mode to storms in a vf involves sleeping (when
11908                  * we wait for the pf's response). Since this function is
11909                  * called from non sleepable context we must schedule
11910                  * a work item for this purpose
11911                  */
11912                 smp_mb__before_clear_bit();
11913                 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
11914                         &bp->sp_rtnl_state);
11915                 smp_mb__after_clear_bit();
11916                 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11917         }
11918 }
11919
11920 /* called with rtnl_lock */
11921 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11922                            int devad, u16 addr)
11923 {
11924         struct bnx2x *bp = netdev_priv(netdev);
11925         u16 value;
11926         int rc;
11927
11928         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11929            prtad, devad, addr);
11930
11931         /* The HW expects different devad if CL22 is used */
11932         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11933
11934         bnx2x_acquire_phy_lock(bp);
11935         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
11936         bnx2x_release_phy_lock(bp);
11937         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
11938
11939         if (!rc)
11940                 rc = value;
11941         return rc;
11942 }
11943
11944 /* called with rtnl_lock */
11945 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11946                             u16 addr, u16 value)
11947 {
11948         struct bnx2x *bp = netdev_priv(netdev);
11949         int rc;
11950
11951         DP(NETIF_MSG_LINK,
11952            "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11953            prtad, devad, addr, value);
11954
11955         /* The HW expects different devad if CL22 is used */
11956         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
11957
11958         bnx2x_acquire_phy_lock(bp);
11959         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
11960         bnx2x_release_phy_lock(bp);
11961         return rc;
11962 }
11963
11964 /* called with rtnl_lock */
11965 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11966 {
11967         struct bnx2x *bp = netdev_priv(dev);
11968         struct mii_ioctl_data *mdio = if_mii(ifr);
11969
11970         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11971            mdio->phy_id, mdio->reg_num, mdio->val_in);
11972
11973         if (!netif_running(dev))
11974                 return -EAGAIN;
11975
11976         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
11977 }
11978
11979 #ifdef CONFIG_NET_POLL_CONTROLLER
11980 static void poll_bnx2x(struct net_device *dev)
11981 {
11982         struct bnx2x *bp = netdev_priv(dev);
11983         int i;
11984
11985         for_each_eth_queue(bp, i) {
11986                 struct bnx2x_fastpath *fp = &bp->fp[i];
11987                 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11988         }
11989 }
11990 #endif
11991
11992 static int bnx2x_validate_addr(struct net_device *dev)
11993 {
11994         struct bnx2x *bp = netdev_priv(dev);
11995
11996         /* query the bulletin board for mac address configured by the PF */
11997         if (IS_VF(bp))
11998                 bnx2x_sample_bulletin(bp);
11999
12000         if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
12001                 BNX2X_ERR("Non-valid Ethernet address\n");
12002                 return -EADDRNOTAVAIL;
12003         }
12004         return 0;
12005 }
12006
12007 static const struct net_device_ops bnx2x_netdev_ops = {
12008         .ndo_open               = bnx2x_open,
12009         .ndo_stop               = bnx2x_close,
12010         .ndo_start_xmit         = bnx2x_start_xmit,
12011         .ndo_select_queue       = bnx2x_select_queue,
12012         .ndo_set_rx_mode        = bnx2x_set_rx_mode,
12013         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12014         .ndo_validate_addr      = bnx2x_validate_addr,
12015         .ndo_do_ioctl           = bnx2x_ioctl,
12016         .ndo_change_mtu         = bnx2x_change_mtu,
12017         .ndo_fix_features       = bnx2x_fix_features,
12018         .ndo_set_features       = bnx2x_set_features,
12019         .ndo_tx_timeout         = bnx2x_tx_timeout,
12020 #ifdef CONFIG_NET_POLL_CONTROLLER
12021         .ndo_poll_controller    = poll_bnx2x,
12022 #endif
12023         .ndo_setup_tc           = bnx2x_setup_tc,
12024 #ifdef CONFIG_BNX2X_SRIOV
12025         .ndo_set_vf_mac         = bnx2x_set_vf_mac,
12026         .ndo_set_vf_vlan        = bnx2x_set_vf_vlan,
12027         .ndo_get_vf_config      = bnx2x_get_vf_config,
12028 #endif
12029 #ifdef NETDEV_FCOE_WWNN
12030         .ndo_fcoe_get_wwn       = bnx2x_fcoe_get_wwn,
12031 #endif
12032
12033 #ifdef CONFIG_NET_LL_RX_POLL
12034         .ndo_ll_poll            = bnx2x_low_latency_recv,
12035 #endif
12036 };
12037
12038 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
12039 {
12040         struct device *dev = &bp->pdev->dev;
12041
12042         if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
12043                 bp->flags |= USING_DAC_FLAG;
12044                 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
12045                         dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
12046                         return -EIO;
12047                 }
12048         } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
12049                 dev_err(dev, "System does not support DMA, aborting\n");
12050                 return -EIO;
12051         }
12052
12053         return 0;
12054 }
12055
12056 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
12057                           struct net_device *dev, unsigned long board_type)
12058 {
12059         int rc;
12060         u32 pci_cfg_dword;
12061         bool chip_is_e1x = (board_type == BCM57710 ||
12062                             board_type == BCM57711 ||
12063                             board_type == BCM57711E);
12064
12065         SET_NETDEV_DEV(dev, &pdev->dev);
12066
12067         bp->dev = dev;
12068         bp->pdev = pdev;
12069
12070         rc = pci_enable_device(pdev);
12071         if (rc) {
12072                 dev_err(&bp->pdev->dev,
12073                         "Cannot enable PCI device, aborting\n");
12074                 goto err_out;
12075         }
12076
12077         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12078                 dev_err(&bp->pdev->dev,
12079                         "Cannot find PCI device base address, aborting\n");
12080                 rc = -ENODEV;
12081                 goto err_out_disable;
12082         }
12083
12084         if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12085                 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
12086                 rc = -ENODEV;
12087                 goto err_out_disable;
12088         }
12089
12090         pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12091         if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12092             PCICFG_REVESION_ID_ERROR_VAL) {
12093                 pr_err("PCI device error, probably due to fan failure, aborting\n");
12094                 rc = -ENODEV;
12095                 goto err_out_disable;
12096         }
12097
12098         if (atomic_read(&pdev->enable_cnt) == 1) {
12099                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12100                 if (rc) {
12101                         dev_err(&bp->pdev->dev,
12102                                 "Cannot obtain PCI resources, aborting\n");
12103                         goto err_out_disable;
12104                 }
12105
12106                 pci_set_master(pdev);
12107                 pci_save_state(pdev);
12108         }
12109
12110         if (IS_PF(bp)) {
12111                 bp->pm_cap = pdev->pm_cap;
12112                 if (bp->pm_cap == 0) {
12113                         dev_err(&bp->pdev->dev,
12114                                 "Cannot find power management capability, aborting\n");
12115                         rc = -EIO;
12116                         goto err_out_release;
12117                 }
12118         }
12119
12120         if (!pci_is_pcie(pdev)) {
12121                 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
12122                 rc = -EIO;
12123                 goto err_out_release;
12124         }
12125
12126         rc = bnx2x_set_coherency_mask(bp);
12127         if (rc)
12128                 goto err_out_release;
12129
12130         dev->mem_start = pci_resource_start(pdev, 0);
12131         dev->base_addr = dev->mem_start;
12132         dev->mem_end = pci_resource_end(pdev, 0);
12133
12134         dev->irq = pdev->irq;
12135
12136         bp->regview = pci_ioremap_bar(pdev, 0);
12137         if (!bp->regview) {
12138                 dev_err(&bp->pdev->dev,
12139                         "Cannot map register space, aborting\n");
12140                 rc = -ENOMEM;
12141                 goto err_out_release;
12142         }
12143
12144         /* In E1/E1H use pci device function given by kernel.
12145          * In E2/E3 read physical function from ME register since these chips
12146          * support Physical Device Assignment where kernel BDF maybe arbitrary
12147          * (depending on hypervisor).
12148          */
12149         if (chip_is_e1x) {
12150                 bp->pf_num = PCI_FUNC(pdev->devfn);
12151         } else {
12152                 /* chip is E2/3*/
12153                 pci_read_config_dword(bp->pdev,
12154                                       PCICFG_ME_REGISTER, &pci_cfg_dword);
12155                 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
12156                                   ME_REG_ABS_PF_NUM_SHIFT);
12157         }
12158         BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
12159
12160         /* clean indirect addresses */
12161         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12162                                PCICFG_VENDOR_ID_OFFSET);
12163         /*
12164          * Clean the following indirect addresses for all functions since it
12165          * is not used by the driver.
12166          */
12167         if (IS_PF(bp)) {
12168                 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12169                 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12170                 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12171                 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12172
12173                 if (chip_is_e1x) {
12174                         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12175                         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12176                         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12177                         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12178                 }
12179
12180                 /* Enable internal target-read (in case we are probed after PF
12181                  * FLR). Must be done prior to any BAR read access. Only for
12182                  * 57712 and up
12183                  */
12184                 if (!chip_is_e1x)
12185                         REG_WR(bp,
12186                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
12187         }
12188
12189         dev->watchdog_timeo = TX_TIMEOUT;
12190
12191         dev->netdev_ops = &bnx2x_netdev_ops;
12192         bnx2x_set_ethtool_ops(bp, dev);
12193
12194         dev->priv_flags |= IFF_UNICAST_FLT;
12195
12196         dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12197                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12198                 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
12199                 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
12200         if (!CHIP_IS_E1x(bp)) {
12201                 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12202                 dev->hw_enc_features =
12203                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12204                         NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12205                         NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
12206         }
12207
12208         dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12209                 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12210
12211         dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
12212         if (bp->flags & USING_DAC_FLAG)
12213                 dev->features |= NETIF_F_HIGHDMA;
12214
12215         /* Add Loopback capability to the device */
12216         dev->hw_features |= NETIF_F_LOOPBACK;
12217
12218 #ifdef BCM_DCBNL
12219         dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12220 #endif
12221
12222         /* get_port_hwinfo() will set prtad and mmds properly */
12223         bp->mdio.prtad = MDIO_PRTAD_NONE;
12224         bp->mdio.mmds = 0;
12225         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12226         bp->mdio.dev = dev;
12227         bp->mdio.mdio_read = bnx2x_mdio_read;
12228         bp->mdio.mdio_write = bnx2x_mdio_write;
12229
12230         return 0;
12231
12232 err_out_release:
12233         if (atomic_read(&pdev->enable_cnt) == 1)
12234                 pci_release_regions(pdev);
12235
12236 err_out_disable:
12237         pci_disable_device(pdev);
12238         pci_set_drvdata(pdev, NULL);
12239
12240 err_out:
12241         return rc;
12242 }
12243
12244 static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
12245                                        enum bnx2x_pci_bus_speed *speed)
12246 {
12247         u32 link_speed, val = 0;
12248
12249         pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
12250         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
12251
12252         link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12253
12254         switch (link_speed) {
12255         case 3:
12256                 *speed = BNX2X_PCI_LINK_SPEED_8000;
12257                 break;
12258         case 2:
12259                 *speed = BNX2X_PCI_LINK_SPEED_5000;
12260                 break;
12261         default:
12262                 *speed = BNX2X_PCI_LINK_SPEED_2500;
12263         }
12264 }
12265
12266 static int bnx2x_check_firmware(struct bnx2x *bp)
12267 {
12268         const struct firmware *firmware = bp->firmware;
12269         struct bnx2x_fw_file_hdr *fw_hdr;
12270         struct bnx2x_fw_file_section *sections;
12271         u32 offset, len, num_ops;
12272         __be16 *ops_offsets;
12273         int i;
12274         const u8 *fw_ver;
12275
12276         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12277                 BNX2X_ERR("Wrong FW size\n");
12278                 return -EINVAL;
12279         }
12280
12281         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12282         sections = (struct bnx2x_fw_file_section *)fw_hdr;
12283
12284         /* Make sure none of the offsets and sizes make us read beyond
12285          * the end of the firmware data */
12286         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12287                 offset = be32_to_cpu(sections[i].offset);
12288                 len = be32_to_cpu(sections[i].len);
12289                 if (offset + len > firmware->size) {
12290                         BNX2X_ERR("Section %d length is out of bounds\n", i);
12291                         return -EINVAL;
12292                 }
12293         }
12294
12295         /* Likewise for the init_ops offsets */
12296         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12297         ops_offsets = (__force __be16 *)(firmware->data + offset);
12298         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12299
12300         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12301                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
12302                         BNX2X_ERR("Section offset %d is out of bounds\n", i);
12303                         return -EINVAL;
12304                 }
12305         }
12306
12307         /* Check FW version */
12308         offset = be32_to_cpu(fw_hdr->fw_version.offset);
12309         fw_ver = firmware->data + offset;
12310         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12311             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12312             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12313             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
12314                 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12315                        fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12316                        BCM_5710_FW_MAJOR_VERSION,
12317                        BCM_5710_FW_MINOR_VERSION,
12318                        BCM_5710_FW_REVISION_VERSION,
12319                        BCM_5710_FW_ENGINEERING_VERSION);
12320                 return -EINVAL;
12321         }
12322
12323         return 0;
12324 }
12325
12326 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12327 {
12328         const __be32 *source = (const __be32 *)_source;
12329         u32 *target = (u32 *)_target;
12330         u32 i;
12331
12332         for (i = 0; i < n/4; i++)
12333                 target[i] = be32_to_cpu(source[i]);
12334 }
12335
12336 /*
12337    Ops array is stored in the following format:
12338    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12339  */
12340 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12341 {
12342         const __be32 *source = (const __be32 *)_source;
12343         struct raw_op *target = (struct raw_op *)_target;
12344         u32 i, j, tmp;
12345
12346         for (i = 0, j = 0; i < n/8; i++, j += 2) {
12347                 tmp = be32_to_cpu(source[j]);
12348                 target[i].op = (tmp >> 24) & 0xff;
12349                 target[i].offset = tmp & 0xffffff;
12350                 target[i].raw_data = be32_to_cpu(source[j + 1]);
12351         }
12352 }
12353
12354 /* IRO array is stored in the following format:
12355  * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
12356  */
12357 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
12358 {
12359         const __be32 *source = (const __be32 *)_source;
12360         struct iro *target = (struct iro *)_target;
12361         u32 i, j, tmp;
12362
12363         for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12364                 target[i].base = be32_to_cpu(source[j]);
12365                 j++;
12366                 tmp = be32_to_cpu(source[j]);
12367                 target[i].m1 = (tmp >> 16) & 0xffff;
12368                 target[i].m2 = tmp & 0xffff;
12369                 j++;
12370                 tmp = be32_to_cpu(source[j]);
12371                 target[i].m3 = (tmp >> 16) & 0xffff;
12372                 target[i].size = tmp & 0xffff;
12373                 j++;
12374         }
12375 }
12376
12377 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
12378 {
12379         const __be16 *source = (const __be16 *)_source;
12380         u16 *target = (u16 *)_target;
12381         u32 i;
12382
12383         for (i = 0; i < n/2; i++)
12384                 target[i] = be16_to_cpu(source[i]);
12385 }
12386
12387 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
12388 do {                                                                    \
12389         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
12390         bp->arr = kmalloc(len, GFP_KERNEL);                             \
12391         if (!bp->arr)                                                   \
12392                 goto lbl;                                               \
12393         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
12394              (u8 *)bp->arr, len);                                       \
12395 } while (0)
12396
12397 static int bnx2x_init_firmware(struct bnx2x *bp)
12398 {
12399         const char *fw_file_name;
12400         struct bnx2x_fw_file_hdr *fw_hdr;
12401         int rc;
12402
12403         if (bp->firmware)
12404                 return 0;
12405
12406         if (CHIP_IS_E1(bp))
12407                 fw_file_name = FW_FILE_NAME_E1;
12408         else if (CHIP_IS_E1H(bp))
12409                 fw_file_name = FW_FILE_NAME_E1H;
12410         else if (!CHIP_IS_E1x(bp))
12411                 fw_file_name = FW_FILE_NAME_E2;
12412         else {
12413                 BNX2X_ERR("Unsupported chip revision\n");
12414                 return -EINVAL;
12415         }
12416         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
12417
12418         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12419         if (rc) {
12420                 BNX2X_ERR("Can't load firmware file %s\n",
12421                           fw_file_name);
12422                 goto request_firmware_exit;
12423         }
12424
12425         rc = bnx2x_check_firmware(bp);
12426         if (rc) {
12427                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12428                 goto request_firmware_exit;
12429         }
12430
12431         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12432
12433         /* Initialize the pointers to the init arrays */
12434         /* Blob */
12435         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12436
12437         /* Opcodes */
12438         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12439
12440         /* Offsets */
12441         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12442                             be16_to_cpu_n);
12443
12444         /* STORMs firmware */
12445         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12446                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12447         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
12448                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12449         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12450                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12451         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
12452                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
12453         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12454                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12455         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
12456                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12457         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12458                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12459         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
12460                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
12461         /* IRO */
12462         BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
12463
12464         return 0;
12465
12466 iro_alloc_err:
12467         kfree(bp->init_ops_offsets);
12468 init_offsets_alloc_err:
12469         kfree(bp->init_ops);
12470 init_ops_alloc_err:
12471         kfree(bp->init_data);
12472 request_firmware_exit:
12473         release_firmware(bp->firmware);
12474         bp->firmware = NULL;
12475
12476         return rc;
12477 }
12478
12479 static void bnx2x_release_firmware(struct bnx2x *bp)
12480 {
12481         kfree(bp->init_ops_offsets);
12482         kfree(bp->init_ops);
12483         kfree(bp->init_data);
12484         release_firmware(bp->firmware);
12485         bp->firmware = NULL;
12486 }
12487
12488 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12489         .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12490         .init_hw_cmn      = bnx2x_init_hw_common,
12491         .init_hw_port     = bnx2x_init_hw_port,
12492         .init_hw_func     = bnx2x_init_hw_func,
12493
12494         .reset_hw_cmn     = bnx2x_reset_common,
12495         .reset_hw_port    = bnx2x_reset_port,
12496         .reset_hw_func    = bnx2x_reset_func,
12497
12498         .gunzip_init      = bnx2x_gunzip_init,
12499         .gunzip_end       = bnx2x_gunzip_end,
12500
12501         .init_fw          = bnx2x_init_firmware,
12502         .release_fw       = bnx2x_release_firmware,
12503 };
12504
12505 void bnx2x__init_func_obj(struct bnx2x *bp)
12506 {
12507         /* Prepare DMAE related driver resources */
12508         bnx2x_setup_dmae(bp);
12509
12510         bnx2x_init_func_obj(bp, &bp->func_obj,
12511                             bnx2x_sp(bp, func_rdata),
12512                             bnx2x_sp_mapping(bp, func_rdata),
12513                             bnx2x_sp(bp, func_afex_rdata),
12514                             bnx2x_sp_mapping(bp, func_afex_rdata),
12515                             &bnx2x_func_sp_drv);
12516 }
12517
12518 /* must be called after sriov-enable */
12519 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
12520 {
12521         int cid_count = BNX2X_L2_MAX_CID(bp);
12522
12523         if (IS_SRIOV(bp))
12524                 cid_count += BNX2X_VF_CIDS;
12525
12526         if (CNIC_SUPPORT(bp))
12527                 cid_count += CNIC_CID_MAX;
12528
12529         return roundup(cid_count, QM_CID_ROUND);
12530 }
12531
12532 /**
12533  * bnx2x_get_num_none_def_sbs - return the number of none default SBs
12534  *
12535  * @dev:        pci device
12536  *
12537  */
12538 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
12539                                      int cnic_cnt, bool is_vf)
12540 {
12541         int pos, index;
12542         u16 control = 0;
12543
12544         pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
12545
12546         /*
12547          * If MSI-X is not supported - return number of SBs needed to support
12548          * one fast path queue: one FP queue + SB for CNIC
12549          */
12550         if (!pos) {
12551                 dev_info(&pdev->dev, "no msix capability found\n");
12552                 return 1 + cnic_cnt;
12553         }
12554         dev_info(&pdev->dev, "msix capability found\n");
12555
12556         /*
12557          * The value in the PCI configuration space is the index of the last
12558          * entry, namely one less than the actual size of the table, which is
12559          * exactly what we want to return from this function: number of all SBs
12560          * without the default SB.
12561          * For VFs there is no default SB, then we return (index+1).
12562          */
12563         pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
12564
12565         index = control & PCI_MSIX_FLAGS_QSIZE;
12566
12567         return is_vf ? index + 1 : index;
12568 }
12569
12570 static int set_max_cos_est(int chip_id)
12571 {
12572         switch (chip_id) {
12573         case BCM57710:
12574         case BCM57711:
12575         case BCM57711E:
12576                 return BNX2X_MULTI_TX_COS_E1X;
12577         case BCM57712:
12578         case BCM57712_MF:
12579         case BCM57712_VF:
12580                 return BNX2X_MULTI_TX_COS_E2_E3A0;
12581         case BCM57800:
12582         case BCM57800_MF:
12583         case BCM57800_VF:
12584         case BCM57810:
12585         case BCM57810_MF:
12586         case BCM57840_4_10:
12587         case BCM57840_2_20:
12588         case BCM57840_O:
12589         case BCM57840_MFO:
12590         case BCM57810_VF:
12591         case BCM57840_MF:
12592         case BCM57840_VF:
12593         case BCM57811:
12594         case BCM57811_MF:
12595         case BCM57811_VF:
12596                 return BNX2X_MULTI_TX_COS_E3B0;
12597                 return 1;
12598         default:
12599                 pr_err("Unknown board_type (%d), aborting\n", chip_id);
12600                 return -ENODEV;
12601         }
12602 }
12603
12604 static int set_is_vf(int chip_id)
12605 {
12606         switch (chip_id) {
12607         case BCM57712_VF:
12608         case BCM57800_VF:
12609         case BCM57810_VF:
12610         case BCM57840_VF:
12611         case BCM57811_VF:
12612                 return true;
12613         default:
12614                 return false;
12615         }
12616 }
12617
12618 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
12619
12620 static int bnx2x_init_one(struct pci_dev *pdev,
12621                                     const struct pci_device_id *ent)
12622 {
12623         struct net_device *dev = NULL;
12624         struct bnx2x *bp;
12625         int pcie_width;
12626         enum bnx2x_pci_bus_speed pcie_speed;
12627         int rc, max_non_def_sbs;
12628         int rx_count, tx_count, rss_count, doorbell_size;
12629         int max_cos_est;
12630         bool is_vf;
12631         int cnic_cnt;
12632
12633         /* An estimated maximum supported CoS number according to the chip
12634          * version.
12635          * We will try to roughly estimate the maximum number of CoSes this chip
12636          * may support in order to minimize the memory allocated for Tx
12637          * netdev_queue's. This number will be accurately calculated during the
12638          * initialization of bp->max_cos based on the chip versions AND chip
12639          * revision in the bnx2x_init_bp().
12640          */
12641         max_cos_est = set_max_cos_est(ent->driver_data);
12642         if (max_cos_est < 0)
12643                 return max_cos_est;
12644         is_vf = set_is_vf(ent->driver_data);
12645         cnic_cnt = is_vf ? 0 : 1;
12646
12647         max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
12648
12649         /* Maximum number of RSS queues: one IGU SB goes to CNIC */
12650         rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
12651
12652         if (rss_count < 1)
12653                 return -EINVAL;
12654
12655         /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
12656         rx_count = rss_count + cnic_cnt;
12657
12658         /* Maximum number of netdev Tx queues:
12659          * Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
12660          */
12661         tx_count = rss_count * max_cos_est + cnic_cnt;
12662
12663         /* dev zeroed in init_etherdev */
12664         dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
12665         if (!dev)
12666                 return -ENOMEM;
12667
12668         bp = netdev_priv(dev);
12669
12670         bp->flags = 0;
12671         if (is_vf)
12672                 bp->flags |= IS_VF_FLAG;
12673
12674         bp->igu_sb_cnt = max_non_def_sbs;
12675         bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
12676         bp->msg_enable = debug;
12677         bp->cnic_support = cnic_cnt;
12678         bp->cnic_probe = bnx2x_cnic_probe;
12679
12680         pci_set_drvdata(pdev, dev);
12681
12682         rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
12683         if (rc < 0) {
12684                 free_netdev(dev);
12685                 return rc;
12686         }
12687
12688         BNX2X_DEV_INFO("This is a %s function\n",
12689                        IS_PF(bp) ? "physical" : "virtual");
12690         BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
12691         BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
12692         BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
12693                        tx_count, rx_count);
12694
12695         rc = bnx2x_init_bp(bp);
12696         if (rc)
12697                 goto init_one_exit;
12698
12699         /* Map doorbells here as we need the real value of bp->max_cos which
12700          * is initialized in bnx2x_init_bp() to determine the number of
12701          * l2 connections.
12702          */
12703         if (IS_VF(bp)) {
12704                 bp->doorbells = bnx2x_vf_doorbells(bp);
12705                 rc = bnx2x_vf_pci_alloc(bp);
12706                 if (rc)
12707                         goto init_one_exit;
12708         } else {
12709                 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12710                 if (doorbell_size > pci_resource_len(pdev, 2)) {
12711                         dev_err(&bp->pdev->dev,
12712                                 "Cannot map doorbells, bar size too small, aborting\n");
12713                         rc = -ENOMEM;
12714                         goto init_one_exit;
12715                 }
12716                 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12717                                                 doorbell_size);
12718         }
12719         if (!bp->doorbells) {
12720                 dev_err(&bp->pdev->dev,
12721                         "Cannot map doorbell space, aborting\n");
12722                 rc = -ENOMEM;
12723                 goto init_one_exit;
12724         }
12725
12726         if (IS_VF(bp)) {
12727                 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12728                 if (rc)
12729                         goto init_one_exit;
12730         }
12731
12732         /* Enable SRIOV if capability found in configuration space */
12733         rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
12734         if (rc)
12735                 goto init_one_exit;
12736
12737         /* calc qm_cid_count */
12738         bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
12739         BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
12740
12741         /* disable FCOE L2 queue for E1x*/
12742         if (CHIP_IS_E1x(bp))
12743                 bp->flags |= NO_FCOE_FLAG;
12744
12745         /* Set bp->num_queues for MSI-X mode*/
12746         bnx2x_set_num_queues(bp);
12747
12748         /* Configure interrupt mode: try to enable MSI-X/MSI if
12749          * needed.
12750          */
12751         rc = bnx2x_set_int_mode(bp);
12752         if (rc) {
12753                 dev_err(&pdev->dev, "Cannot set interrupts\n");
12754                 goto init_one_exit;
12755         }
12756         BNX2X_DEV_INFO("set interrupts successfully\n");
12757
12758         /* register the net device */
12759         rc = register_netdev(dev);
12760         if (rc) {
12761                 dev_err(&pdev->dev, "Cannot register net device\n");
12762                 goto init_one_exit;
12763         }
12764         BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
12765
12766         if (!NO_FCOE(bp)) {
12767                 /* Add storage MAC address */
12768                 rtnl_lock();
12769                 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12770                 rtnl_unlock();
12771         }
12772
12773         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12774         BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
12775                        pcie_width, pcie_speed);
12776
12777         BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12778                        board_info[ent->driver_data].name,
12779                        (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12780                        pcie_width,
12781                        pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
12782                        pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
12783                        pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
12784                        "Unknown",
12785                        dev->base_addr, bp->pdev->irq, dev->dev_addr);
12786
12787         return 0;
12788
12789 init_one_exit:
12790         if (bp->regview)
12791                 iounmap(bp->regview);
12792
12793         if (IS_PF(bp) && bp->doorbells)
12794                 iounmap(bp->doorbells);
12795
12796         free_netdev(dev);
12797
12798         if (atomic_read(&pdev->enable_cnt) == 1)
12799                 pci_release_regions(pdev);
12800
12801         pci_disable_device(pdev);
12802         pci_set_drvdata(pdev, NULL);
12803
12804         return rc;
12805 }
12806
12807 static void __bnx2x_remove(struct pci_dev *pdev,
12808                            struct net_device *dev,
12809                            struct bnx2x *bp,
12810                            bool remove_netdev)
12811 {
12812         /* Delete storage MAC address */
12813         if (!NO_FCOE(bp)) {
12814                 rtnl_lock();
12815                 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12816                 rtnl_unlock();
12817         }
12818
12819 #ifdef BCM_DCBNL
12820         /* Delete app tlvs from dcbnl */
12821         bnx2x_dcbnl_update_applist(bp, true);
12822 #endif
12823
12824         /* Close the interface - either directly or implicitly */
12825         if (remove_netdev) {
12826                 unregister_netdev(dev);
12827         } else {
12828                 rtnl_lock();
12829                 if (netif_running(dev))
12830                         bnx2x_close(dev);
12831                 rtnl_unlock();
12832         }
12833
12834         bnx2x_iov_remove_one(bp);
12835
12836         /* Power on: we can't let PCI layer write to us while we are in D3 */
12837         if (IS_PF(bp))
12838                 bnx2x_set_power_state(bp, PCI_D0);
12839
12840         /* Disable MSI/MSI-X */
12841         bnx2x_disable_msi(bp);
12842
12843         /* Power off */
12844         if (IS_PF(bp))
12845                 bnx2x_set_power_state(bp, PCI_D3hot);
12846
12847         /* Make sure RESET task is not scheduled before continuing */
12848         cancel_delayed_work_sync(&bp->sp_rtnl_task);
12849
12850         /* send message via vfpf channel to release the resources of this vf */
12851         if (IS_VF(bp))
12852                 bnx2x_vfpf_release(bp);
12853
12854         /* Assumes no further PCIe PM changes will occur */
12855         if (system_state == SYSTEM_POWER_OFF) {
12856                 pci_wake_from_d3(pdev, bp->wol);
12857                 pci_set_power_state(pdev, PCI_D3hot);
12858         }
12859
12860         if (bp->regview)
12861                 iounmap(bp->regview);
12862
12863         /* for vf doorbells are part of the regview and were unmapped along with
12864          * it. FW is only loaded by PF.
12865          */
12866         if (IS_PF(bp)) {
12867                 if (bp->doorbells)
12868                         iounmap(bp->doorbells);
12869
12870                 bnx2x_release_firmware(bp);
12871         }
12872         bnx2x_free_mem_bp(bp);
12873
12874         if (remove_netdev)
12875                 free_netdev(dev);
12876
12877         if (atomic_read(&pdev->enable_cnt) == 1)
12878                 pci_release_regions(pdev);
12879
12880         pci_disable_device(pdev);
12881         pci_set_drvdata(pdev, NULL);
12882 }
12883
12884 static void bnx2x_remove_one(struct pci_dev *pdev)
12885 {
12886         struct net_device *dev = pci_get_drvdata(pdev);
12887         struct bnx2x *bp;
12888
12889         if (!dev) {
12890                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12891                 return;
12892         }
12893         bp = netdev_priv(dev);
12894
12895         __bnx2x_remove(pdev, dev, bp, true);
12896 }
12897
12898 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12899 {
12900         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
12901
12902         bp->rx_mode = BNX2X_RX_MODE_NONE;
12903
12904         if (CNIC_LOADED(bp))
12905                 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12906
12907         /* Stop Tx */
12908         bnx2x_tx_disable(bp);
12909         /* Delete all NAPI objects */
12910         bnx2x_del_all_napi(bp);
12911         if (CNIC_LOADED(bp))
12912                 bnx2x_del_all_napi_cnic(bp);
12913         netdev_reset_tc(bp->dev);
12914
12915         del_timer_sync(&bp->timer);
12916         cancel_delayed_work(&bp->sp_task);
12917         cancel_delayed_work(&bp->period_task);
12918
12919         spin_lock_bh(&bp->stats_lock);
12920         bp->stats_state = STATS_STATE_DISABLED;
12921         spin_unlock_bh(&bp->stats_lock);
12922
12923         bnx2x_save_statistics(bp);
12924
12925         netif_carrier_off(bp->dev);
12926
12927         return 0;
12928 }
12929
12930 /**
12931  * bnx2x_io_error_detected - called when PCI error is detected
12932  * @pdev: Pointer to PCI device
12933  * @state: The current pci connection state
12934  *
12935  * This function is called after a PCI bus error affecting
12936  * this device has been detected.
12937  */
12938 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12939                                                 pci_channel_state_t state)
12940 {
12941         struct net_device *dev = pci_get_drvdata(pdev);
12942         struct bnx2x *bp = netdev_priv(dev);
12943
12944         rtnl_lock();
12945
12946         BNX2X_ERR("IO error detected\n");
12947
12948         netif_device_detach(dev);
12949
12950         if (state == pci_channel_io_perm_failure) {
12951                 rtnl_unlock();
12952                 return PCI_ERS_RESULT_DISCONNECT;
12953         }
12954
12955         if (netif_running(dev))
12956                 bnx2x_eeh_nic_unload(bp);
12957
12958         bnx2x_prev_path_mark_eeh(bp);
12959
12960         pci_disable_device(pdev);
12961
12962         rtnl_unlock();
12963
12964         /* Request a slot reset */
12965         return PCI_ERS_RESULT_NEED_RESET;
12966 }
12967
12968 /**
12969  * bnx2x_io_slot_reset - called after the PCI bus has been reset
12970  * @pdev: Pointer to PCI device
12971  *
12972  * Restart the card from scratch, as if from a cold-boot.
12973  */
12974 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12975 {
12976         struct net_device *dev = pci_get_drvdata(pdev);
12977         struct bnx2x *bp = netdev_priv(dev);
12978         int i;
12979
12980         rtnl_lock();
12981         BNX2X_ERR("IO slot reset initializing...\n");
12982         if (pci_enable_device(pdev)) {
12983                 dev_err(&pdev->dev,
12984                         "Cannot re-enable PCI device after reset\n");
12985                 rtnl_unlock();
12986                 return PCI_ERS_RESULT_DISCONNECT;
12987         }
12988
12989         pci_set_master(pdev);
12990         pci_restore_state(pdev);
12991         pci_save_state(pdev);
12992
12993         if (netif_running(dev))
12994                 bnx2x_set_power_state(bp, PCI_D0);
12995
12996         if (netif_running(dev)) {
12997                 BNX2X_ERR("IO slot reset --> driver unload\n");
12998
12999                 /* MCP should have been reset; Need to wait for validity */
13000                 bnx2x_init_shmem(bp);
13001
13002                 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
13003                         u32 v;
13004
13005                         v = SHMEM2_RD(bp,
13006                                       drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
13007                         SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
13008                                   v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
13009                 }
13010                 bnx2x_drain_tx_queues(bp);
13011                 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
13012                 bnx2x_netif_stop(bp, 1);
13013                 bnx2x_free_irq(bp);
13014
13015                 /* Report UNLOAD_DONE to MCP */
13016                 bnx2x_send_unload_done(bp, true);
13017
13018                 bp->sp_state = 0;
13019                 bp->port.pmf = 0;
13020
13021                 bnx2x_prev_unload(bp);
13022
13023                 /* We should have reseted the engine, so It's fair to
13024                  * assume the FW will no longer write to the bnx2x driver.
13025                  */
13026                 bnx2x_squeeze_objects(bp);
13027                 bnx2x_free_skbs(bp);
13028                 for_each_rx_queue(bp, i)
13029                         bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13030                 bnx2x_free_fp_mem(bp);
13031                 bnx2x_free_mem(bp);
13032
13033                 bp->state = BNX2X_STATE_CLOSED;
13034         }
13035
13036         rtnl_unlock();
13037
13038         return PCI_ERS_RESULT_RECOVERED;
13039 }
13040
13041 /**
13042  * bnx2x_io_resume - called when traffic can start flowing again
13043  * @pdev: Pointer to PCI device
13044  *
13045  * This callback is called when the error recovery driver tells us that
13046  * its OK to resume normal operation.
13047  */
13048 static void bnx2x_io_resume(struct pci_dev *pdev)
13049 {
13050         struct net_device *dev = pci_get_drvdata(pdev);
13051         struct bnx2x *bp = netdev_priv(dev);
13052
13053         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13054                 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
13055                 return;
13056         }
13057
13058         rtnl_lock();
13059
13060         bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
13061                                                         DRV_MSG_SEQ_NUMBER_MASK;
13062
13063         if (netif_running(dev))
13064                 bnx2x_nic_load(bp, LOAD_NORMAL);
13065
13066         netif_device_attach(dev);
13067
13068         rtnl_unlock();
13069 }
13070
13071 static const struct pci_error_handlers bnx2x_err_handler = {
13072         .error_detected = bnx2x_io_error_detected,
13073         .slot_reset     = bnx2x_io_slot_reset,
13074         .resume         = bnx2x_io_resume,
13075 };
13076
13077 static void bnx2x_shutdown(struct pci_dev *pdev)
13078 {
13079         struct net_device *dev = pci_get_drvdata(pdev);
13080         struct bnx2x *bp;
13081
13082         if (!dev)
13083                 return;
13084
13085         bp = netdev_priv(dev);
13086         if (!bp)
13087                 return;
13088
13089         rtnl_lock();
13090         netif_device_detach(dev);
13091         rtnl_unlock();
13092
13093         /* Don't remove the netdevice, as there are scenarios which will cause
13094          * the kernel to hang, e.g., when trying to remove bnx2i while the
13095          * rootfs is mounted from SAN.
13096          */
13097         __bnx2x_remove(pdev, dev, bp, false);
13098 }
13099
13100 static struct pci_driver bnx2x_pci_driver = {
13101         .name        = DRV_MODULE_NAME,
13102         .id_table    = bnx2x_pci_tbl,
13103         .probe       = bnx2x_init_one,
13104         .remove      = bnx2x_remove_one,
13105         .suspend     = bnx2x_suspend,
13106         .resume      = bnx2x_resume,
13107         .err_handler = &bnx2x_err_handler,
13108 #ifdef CONFIG_BNX2X_SRIOV
13109         .sriov_configure = bnx2x_sriov_configure,
13110 #endif
13111         .shutdown    = bnx2x_shutdown,
13112 };
13113
13114 static int __init bnx2x_init(void)
13115 {
13116         int ret;
13117
13118         pr_info("%s", version);
13119
13120         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13121         if (bnx2x_wq == NULL) {
13122                 pr_err("Cannot create workqueue\n");
13123                 return -ENOMEM;
13124         }
13125
13126         ret = pci_register_driver(&bnx2x_pci_driver);
13127         if (ret) {
13128                 pr_err("Cannot register driver\n");
13129                 destroy_workqueue(bnx2x_wq);
13130         }
13131         return ret;
13132 }
13133
13134 static void __exit bnx2x_cleanup(void)
13135 {
13136         struct list_head *pos, *q;
13137
13138         pci_unregister_driver(&bnx2x_pci_driver);
13139
13140         destroy_workqueue(bnx2x_wq);
13141
13142         /* Free globally allocated resources */
13143         list_for_each_safe(pos, q, &bnx2x_prev_list) {
13144                 struct bnx2x_prev_path_list *tmp =
13145                         list_entry(pos, struct bnx2x_prev_path_list, list);
13146                 list_del(pos);
13147                 kfree(tmp);
13148         }
13149 }
13150
13151 void bnx2x_notify_link_changed(struct bnx2x *bp)
13152 {
13153         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13154 }
13155
13156 module_init(bnx2x_init);
13157 module_exit(bnx2x_cleanup);
13158
13159 /**
13160  * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
13161  *
13162  * @bp:         driver handle
13163  * @set:        set or clear the CAM entry
13164  *
13165  * This function will wait until the ramrod completion returns.
13166  * Return 0 if success, -ENODEV if ramrod doesn't return.
13167  */
13168 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
13169 {
13170         unsigned long ramrod_flags = 0;
13171
13172         __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13173         return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13174                                  &bp->iscsi_l2_mac_obj, true,
13175                                  BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13176 }
13177
13178 /* count denotes the number of new completions we have seen */
13179 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13180 {
13181         struct eth_spe *spe;
13182         int cxt_index, cxt_offset;
13183
13184 #ifdef BNX2X_STOP_ON_ERROR
13185         if (unlikely(bp->panic))
13186                 return;
13187 #endif
13188
13189         spin_lock_bh(&bp->spq_lock);
13190         BUG_ON(bp->cnic_spq_pending < count);
13191         bp->cnic_spq_pending -= count;
13192
13193         for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13194                 u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13195                                 & SPE_HDR_CONN_TYPE) >>
13196                                 SPE_HDR_CONN_TYPE_SHIFT;
13197                 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13198                                 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
13199
13200                 /* Set validation for iSCSI L2 client before sending SETUP
13201                  *  ramrod
13202                  */
13203                 if (type == ETH_CONNECTION_TYPE) {
13204                         if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
13205                                 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
13206                                         ILT_PAGE_CIDS;
13207                                 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
13208                                         (cxt_index * ILT_PAGE_CIDS);
13209                                 bnx2x_set_ctx_validation(bp,
13210                                         &bp->context[cxt_index].
13211                                                          vcxt[cxt_offset].eth,
13212                                         BNX2X_ISCSI_ETH_CID(bp));
13213                         }
13214                 }
13215
13216                 /*
13217                  * There may be not more than 8 L2, not more than 8 L5 SPEs
13218                  * and in the air. We also check that number of outstanding
13219                  * COMMON ramrods is not more than the EQ and SPQ can
13220                  * accommodate.
13221                  */
13222                 if (type == ETH_CONNECTION_TYPE) {
13223                         if (!atomic_read(&bp->cq_spq_left))
13224                                 break;
13225                         else
13226                                 atomic_dec(&bp->cq_spq_left);
13227                 } else if (type == NONE_CONNECTION_TYPE) {
13228                         if (!atomic_read(&bp->eq_spq_left))
13229                                 break;
13230                         else
13231                                 atomic_dec(&bp->eq_spq_left);
13232                 } else if ((type == ISCSI_CONNECTION_TYPE) ||
13233                            (type == FCOE_CONNECTION_TYPE)) {
13234                         if (bp->cnic_spq_pending >=
13235                             bp->cnic_eth_dev.max_kwqe_pending)
13236                                 break;
13237                         else
13238                                 bp->cnic_spq_pending++;
13239                 } else {
13240                         BNX2X_ERR("Unknown SPE type: %d\n", type);
13241                         bnx2x_panic();
13242                         break;
13243                 }
13244
13245                 spe = bnx2x_sp_get_next(bp);
13246                 *spe = *bp->cnic_kwq_cons;
13247
13248                 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
13249                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13250
13251                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13252                         bp->cnic_kwq_cons = bp->cnic_kwq;
13253                 else
13254                         bp->cnic_kwq_cons++;
13255         }
13256         bnx2x_sp_prod_update(bp);
13257         spin_unlock_bh(&bp->spq_lock);
13258 }
13259
13260 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13261                                struct kwqe_16 *kwqes[], u32 count)
13262 {
13263         struct bnx2x *bp = netdev_priv(dev);
13264         int i;
13265
13266 #ifdef BNX2X_STOP_ON_ERROR
13267         if (unlikely(bp->panic)) {
13268                 BNX2X_ERR("Can't post to SP queue while panic\n");
13269                 return -EIO;
13270         }
13271 #endif
13272
13273         if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
13274             (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
13275                 BNX2X_ERR("Handling parity error recovery. Try again later\n");
13276                 return -EAGAIN;
13277         }
13278
13279         spin_lock_bh(&bp->spq_lock);
13280
13281         for (i = 0; i < count; i++) {
13282                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13283
13284                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13285                         break;
13286
13287                 *bp->cnic_kwq_prod = *spe;
13288
13289                 bp->cnic_kwq_pending++;
13290
13291                 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
13292                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13293                    spe->data.update_data_addr.hi,
13294                    spe->data.update_data_addr.lo,
13295                    bp->cnic_kwq_pending);
13296
13297                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13298                         bp->cnic_kwq_prod = bp->cnic_kwq;
13299                 else
13300                         bp->cnic_kwq_prod++;
13301         }
13302
13303         spin_unlock_bh(&bp->spq_lock);
13304
13305         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13306                 bnx2x_cnic_sp_post(bp, 0);
13307
13308         return i;
13309 }
13310
13311 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13312 {
13313         struct cnic_ops *c_ops;
13314         int rc = 0;
13315
13316         mutex_lock(&bp->cnic_mutex);
13317         c_ops = rcu_dereference_protected(bp->cnic_ops,
13318                                           lockdep_is_held(&bp->cnic_mutex));
13319         if (c_ops)
13320                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13321         mutex_unlock(&bp->cnic_mutex);
13322
13323         return rc;
13324 }
13325
13326 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13327 {
13328         struct cnic_ops *c_ops;
13329         int rc = 0;
13330
13331         rcu_read_lock();
13332         c_ops = rcu_dereference(bp->cnic_ops);
13333         if (c_ops)
13334                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13335         rcu_read_unlock();
13336
13337         return rc;
13338 }
13339
13340 /*
13341  * for commands that have no data
13342  */
13343 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13344 {
13345         struct cnic_ctl_info ctl = {0};
13346
13347         ctl.cmd = cmd;
13348
13349         return bnx2x_cnic_ctl_send(bp, &ctl);
13350 }
13351
13352 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
13353 {
13354         struct cnic_ctl_info ctl = {0};
13355
13356         /* first we tell CNIC and only then we count this as a completion */
13357         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13358         ctl.data.comp.cid = cid;
13359         ctl.data.comp.error = err;
13360
13361         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13362         bnx2x_cnic_sp_post(bp, 0);
13363 }
13364
13365 /* Called with netif_addr_lock_bh() taken.
13366  * Sets an rx_mode config for an iSCSI ETH client.
13367  * Doesn't block.
13368  * Completion should be checked outside.
13369  */
13370 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13371 {
13372         unsigned long accept_flags = 0, ramrod_flags = 0;
13373         u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13374         int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13375
13376         if (start) {
13377                 /* Start accepting on iSCSI L2 ring. Accept all multicasts
13378                  * because it's the only way for UIO Queue to accept
13379                  * multicasts (in non-promiscuous mode only one Queue per
13380                  * function will receive multicast packets (leading in our
13381                  * case).
13382                  */
13383                 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13384                 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13385                 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13386                 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13387
13388                 /* Clear STOP_PENDING bit if START is requested */
13389                 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13390
13391                 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13392         } else
13393                 /* Clear START_PENDING bit if STOP is requested */
13394                 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13395
13396         if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13397                 set_bit(sched_state, &bp->sp_state);
13398         else {
13399                 __set_bit(RAMROD_RX, &ramrod_flags);
13400                 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13401                                     ramrod_flags);
13402         }
13403 }
13404
13405 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13406 {
13407         struct bnx2x *bp = netdev_priv(dev);
13408         int rc = 0;
13409
13410         switch (ctl->cmd) {
13411         case DRV_CTL_CTXTBL_WR_CMD: {
13412                 u32 index = ctl->data.io.offset;
13413                 dma_addr_t addr = ctl->data.io.dma_addr;
13414
13415                 bnx2x_ilt_wr(bp, index, addr);
13416                 break;
13417         }
13418
13419         case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13420                 int count = ctl->data.credit.credit_count;
13421
13422                 bnx2x_cnic_sp_post(bp, count);
13423                 break;
13424         }
13425
13426         /* rtnl_lock is held.  */
13427         case DRV_CTL_START_L2_CMD: {
13428                 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13429                 unsigned long sp_bits = 0;
13430
13431                 /* Configure the iSCSI classification object */
13432                 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13433                                    cp->iscsi_l2_client_id,
13434                                    cp->iscsi_l2_cid, BP_FUNC(bp),
13435                                    bnx2x_sp(bp, mac_rdata),
13436                                    bnx2x_sp_mapping(bp, mac_rdata),
13437                                    BNX2X_FILTER_MAC_PENDING,
13438                                    &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13439                                    &bp->macs_pool);
13440
13441                 /* Set iSCSI MAC address */
13442                 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13443                 if (rc)
13444                         break;
13445
13446                 mmiowb();
13447                 barrier();
13448
13449                 /* Start accepting on iSCSI L2 ring */
13450
13451                 netif_addr_lock_bh(dev);
13452                 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13453                 netif_addr_unlock_bh(dev);
13454
13455                 /* bits to wait on */
13456                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13457                 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13458
13459                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13460                         BNX2X_ERR("rx_mode completion timed out!\n");
13461
13462                 break;
13463         }
13464
13465         /* rtnl_lock is held.  */
13466         case DRV_CTL_STOP_L2_CMD: {
13467                 unsigned long sp_bits = 0;
13468
13469                 /* Stop accepting on iSCSI L2 ring */
13470                 netif_addr_lock_bh(dev);
13471                 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13472                 netif_addr_unlock_bh(dev);
13473
13474                 /* bits to wait on */
13475                 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13476                 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13477
13478                 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13479                         BNX2X_ERR("rx_mode completion timed out!\n");
13480
13481                 mmiowb();
13482                 barrier();
13483
13484                 /* Unset iSCSI L2 MAC */
13485                 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13486                                         BNX2X_ISCSI_ETH_MAC, true);
13487                 break;
13488         }
13489         case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13490                 int count = ctl->data.credit.credit_count;
13491
13492                 smp_mb__before_atomic_inc();
13493                 atomic_add(count, &bp->cq_spq_left);
13494                 smp_mb__after_atomic_inc();
13495                 break;
13496         }
13497         case DRV_CTL_ULP_REGISTER_CMD: {
13498                 int ulp_type = ctl->data.register_data.ulp_type;
13499
13500                 if (CHIP_IS_E3(bp)) {
13501                         int idx = BP_FW_MB_IDX(bp);
13502                         u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13503                         int path = BP_PATH(bp);
13504                         int port = BP_PORT(bp);
13505                         int i;
13506                         u32 scratch_offset;
13507                         u32 *host_addr;
13508
13509                         /* first write capability to shmem2 */
13510                         if (ulp_type == CNIC_ULP_ISCSI)
13511                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13512                         else if (ulp_type == CNIC_ULP_FCOE)
13513                                 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13514                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13515
13516                         if ((ulp_type != CNIC_ULP_FCOE) ||
13517                             (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13518                             (!(bp->flags &  BC_SUPPORTS_FCOE_FEATURES)))
13519                                 break;
13520
13521                         /* if reached here - should write fcoe capabilities */
13522                         scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13523                         if (!scratch_offset)
13524                                 break;
13525                         scratch_offset += offsetof(struct glob_ncsi_oem_data,
13526                                                    fcoe_features[path][port]);
13527                         host_addr = (u32 *) &(ctl->data.register_data.
13528                                               fcoe_features);
13529                         for (i = 0; i < sizeof(struct fcoe_capabilities);
13530                              i += 4)
13531                                 REG_WR(bp, scratch_offset + i,
13532                                        *(host_addr + i/4));
13533                 }
13534                 break;
13535         }
13536
13537         case DRV_CTL_ULP_UNREGISTER_CMD: {
13538                 int ulp_type = ctl->data.ulp_type;
13539
13540                 if (CHIP_IS_E3(bp)) {
13541                         int idx = BP_FW_MB_IDX(bp);
13542                         u32 cap;
13543
13544                         cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13545                         if (ulp_type == CNIC_ULP_ISCSI)
13546                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13547                         else if (ulp_type == CNIC_ULP_FCOE)
13548                                 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13549                         SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13550                 }
13551                 break;
13552         }
13553
13554         default:
13555                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13556                 rc = -EINVAL;
13557         }
13558
13559         return rc;
13560 }
13561
13562 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13563 {
13564         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13565
13566         if (bp->flags & USING_MSIX_FLAG) {
13567                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13568                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13569                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13570         } else {
13571                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13572                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13573         }
13574         if (!CHIP_IS_E1x(bp))
13575                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13576         else
13577                 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13578
13579         cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
13580         cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
13581         cp->irq_arr[1].status_blk = bp->def_status_blk;
13582         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13583         cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
13584
13585         cp->num_irq = 2;
13586 }
13587
13588 void bnx2x_setup_cnic_info(struct bnx2x *bp)
13589 {
13590         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13591
13592         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13593                              bnx2x_cid_ilt_lines(bp);
13594         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13595         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13596         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13597
13598         if (NO_ISCSI_OOO(bp))
13599                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13600 }
13601
13602 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13603                                void *data)
13604 {
13605         struct bnx2x *bp = netdev_priv(dev);
13606         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13607         int rc;
13608
13609         DP(NETIF_MSG_IFUP, "Register_cnic called\n");
13610
13611         if (ops == NULL) {
13612                 BNX2X_ERR("NULL ops received\n");
13613                 return -EINVAL;
13614         }
13615
13616         if (!CNIC_SUPPORT(bp)) {
13617                 BNX2X_ERR("Can't register CNIC when not supported\n");
13618                 return -EOPNOTSUPP;
13619         }
13620
13621         if (!CNIC_LOADED(bp)) {
13622                 rc = bnx2x_load_cnic(bp);
13623                 if (rc) {
13624                         BNX2X_ERR("CNIC-related load failed\n");
13625                         return rc;
13626                 }
13627         }
13628
13629         bp->cnic_enabled = true;
13630
13631         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13632         if (!bp->cnic_kwq)
13633                 return -ENOMEM;
13634
13635         bp->cnic_kwq_cons = bp->cnic_kwq;
13636         bp->cnic_kwq_prod = bp->cnic_kwq;
13637         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13638
13639         bp->cnic_spq_pending = 0;
13640         bp->cnic_kwq_pending = 0;
13641
13642         bp->cnic_data = data;
13643
13644         cp->num_irq = 0;
13645         cp->drv_state |= CNIC_DRV_STATE_REGD;
13646         cp->iro_arr = bp->iro_arr;
13647
13648         bnx2x_setup_cnic_irq_info(bp);
13649
13650         rcu_assign_pointer(bp->cnic_ops, ops);
13651
13652         return 0;
13653 }
13654
13655 static int bnx2x_unregister_cnic(struct net_device *dev)
13656 {
13657         struct bnx2x *bp = netdev_priv(dev);
13658         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13659
13660         mutex_lock(&bp->cnic_mutex);
13661         cp->drv_state = 0;
13662         RCU_INIT_POINTER(bp->cnic_ops, NULL);
13663         mutex_unlock(&bp->cnic_mutex);
13664         synchronize_rcu();
13665         bp->cnic_enabled = false;
13666         kfree(bp->cnic_kwq);
13667         bp->cnic_kwq = NULL;
13668
13669         return 0;
13670 }
13671
13672 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13673 {
13674         struct bnx2x *bp = netdev_priv(dev);
13675         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13676
13677         /* If both iSCSI and FCoE are disabled - return NULL in
13678          * order to indicate CNIC that it should not try to work
13679          * with this device.
13680          */
13681         if (NO_ISCSI(bp) && NO_FCOE(bp))
13682                 return NULL;
13683
13684         cp->drv_owner = THIS_MODULE;
13685         cp->chip_id = CHIP_ID(bp);
13686         cp->pdev = bp->pdev;
13687         cp->io_base = bp->regview;
13688         cp->io_base2 = bp->doorbells;
13689         cp->max_kwqe_pending = 8;
13690         cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
13691         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13692                              bnx2x_cid_ilt_lines(bp);
13693         cp->ctx_tbl_len = CNIC_ILT_LINES;
13694         cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13695         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13696         cp->drv_ctl = bnx2x_drv_ctl;
13697         cp->drv_register_cnic = bnx2x_register_cnic;
13698         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13699         cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13700         cp->iscsi_l2_client_id =
13701                 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13702         cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13703
13704         if (NO_ISCSI_OOO(bp))
13705                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13706
13707         if (NO_ISCSI(bp))
13708                 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13709
13710         if (NO_FCOE(bp))
13711                 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13712
13713         BNX2X_DEV_INFO(
13714                 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
13715            cp->ctx_blk_size,
13716            cp->ctx_tbl_offset,
13717            cp->ctx_tbl_len,
13718            cp->starting_cid);
13719         return cp;
13720 }
13721
13722 u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
13723 {
13724         struct bnx2x *bp = fp->bp;
13725         u32 offset = BAR_USTRORM_INTMEM;
13726
13727         if (IS_VF(bp))
13728                 return bnx2x_vf_ustorm_prods_offset(bp, fp);
13729         else if (!CHIP_IS_E1x(bp))
13730                 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
13731         else
13732                 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
13733
13734         return offset;
13735 }
13736
13737 /* called only on E1H or E2.
13738  * When pretending to be PF, the pretend value is the function number 0...7
13739  * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
13740  * combination
13741  */
13742 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
13743 {
13744         u32 pretend_reg;
13745
13746         if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
13747                 return -1;
13748
13749         /* get my own pretend register */
13750         pretend_reg = bnx2x_get_pretend_reg(bp);
13751         REG_WR(bp, pretend_reg, pretend_func_val);
13752         REG_RD(bp, pretend_reg);
13753         return 0;
13754 }