]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Use GET_PROFILE_CONFIG V1 cmd for BE3-R
[~andy/linux] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_mismatch_drops =
357                                         port_stats->rx_address_mismatch_drops +
358                                         port_stats->rx_vlan_mismatch_drops;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->jabber_events = port_stats->jabber_events;
414         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
415         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
416         drvs->forwarded_packets = rxf_stats->forwarded_packets;
417         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
418         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
419         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
420         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
421 }
422
423 static void populate_lancer_stats(struct be_adapter *adapter)
424 {
425
426         struct be_drv_stats *drvs = &adapter->drv_stats;
427         struct lancer_pport_stats *pport_stats =
428                                         pport_stats_from_cmd(adapter);
429
430         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
431         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
432         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
433         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
434         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
435         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
436         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
437         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
438         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
439         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
440         drvs->rx_dropped_tcp_length =
441                                 pport_stats->rx_dropped_invalid_tcp_length;
442         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
443         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
444         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
445         drvs->rx_dropped_header_too_small =
446                                 pport_stats->rx_dropped_header_too_small;
447         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
448         drvs->rx_address_mismatch_drops =
449                                         pport_stats->rx_address_mismatch_drops +
450                                         pport_stats->rx_vlan_mismatch_drops;
451         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
452         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
453         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
454         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
455         drvs->jabber_events = pport_stats->rx_jabbers;
456         drvs->forwarded_packets = pport_stats->num_forwards_lo;
457         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
458         drvs->rx_drops_too_many_frags =
459                                 pport_stats->rx_drops_too_many_frags_lo;
460 }
461
462 static void accumulate_16bit_val(u32 *acc, u16 val)
463 {
464 #define lo(x)                   (x & 0xFFFF)
465 #define hi(x)                   (x & 0xFFFF0000)
466         bool wrapped = val < lo(*acc);
467         u32 newacc = hi(*acc) + val;
468
469         if (wrapped)
470                 newacc += 65536;
471         ACCESS_ONCE(*acc) = newacc;
472 }
473
474 void be_parse_stats(struct be_adapter *adapter)
475 {
476         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
477         struct be_rx_obj *rxo;
478         int i;
479
480         if (lancer_chip(adapter)) {
481                 populate_lancer_stats(adapter);
482         } else {
483                 if (BE2_chip(adapter))
484                         populate_be_v0_stats(adapter);
485                 else
486                         /* for BE3 and Skyhawk */
487                         populate_be_v1_stats(adapter);
488
489                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
490                 for_all_rx_queues(adapter, rxo, i) {
491                         /* below erx HW counter can actually wrap around after
492                          * 65535. Driver accumulates a 32-bit value
493                          */
494                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
495                                              (u16)erx->rx_drops_no_fragments \
496                                              [rxo->q.id]);
497                 }
498         }
499 }
500
501 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
502                                         struct rtnl_link_stats64 *stats)
503 {
504         struct be_adapter *adapter = netdev_priv(netdev);
505         struct be_drv_stats *drvs = &adapter->drv_stats;
506         struct be_rx_obj *rxo;
507         struct be_tx_obj *txo;
508         u64 pkts, bytes;
509         unsigned int start;
510         int i;
511
512         for_all_rx_queues(adapter, rxo, i) {
513                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
514                 do {
515                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
516                         pkts = rx_stats(rxo)->rx_pkts;
517                         bytes = rx_stats(rxo)->rx_bytes;
518                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
519                 stats->rx_packets += pkts;
520                 stats->rx_bytes += bytes;
521                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
522                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
523                                         rx_stats(rxo)->rx_drops_no_frags;
524         }
525
526         for_all_tx_queues(adapter, txo, i) {
527                 const struct be_tx_stats *tx_stats = tx_stats(txo);
528                 do {
529                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
530                         pkts = tx_stats(txo)->tx_pkts;
531                         bytes = tx_stats(txo)->tx_bytes;
532                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
533                 stats->tx_packets += pkts;
534                 stats->tx_bytes += bytes;
535         }
536
537         /* bad pkts received */
538         stats->rx_errors = drvs->rx_crc_errors +
539                 drvs->rx_alignment_symbol_errors +
540                 drvs->rx_in_range_errors +
541                 drvs->rx_out_range_errors +
542                 drvs->rx_frame_too_long +
543                 drvs->rx_dropped_too_small +
544                 drvs->rx_dropped_too_short +
545                 drvs->rx_dropped_header_too_small +
546                 drvs->rx_dropped_tcp_length +
547                 drvs->rx_dropped_runt;
548
549         /* detailed rx errors */
550         stats->rx_length_errors = drvs->rx_in_range_errors +
551                 drvs->rx_out_range_errors +
552                 drvs->rx_frame_too_long;
553
554         stats->rx_crc_errors = drvs->rx_crc_errors;
555
556         /* frame alignment errors */
557         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
558
559         /* receiver fifo overrun */
560         /* drops_no_pbuf is no per i/f, it's per BE card */
561         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
562                                 drvs->rx_input_fifo_overflow_drop +
563                                 drvs->rx_drops_no_pbuf;
564         return stats;
565 }
566
567 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
568 {
569         struct net_device *netdev = adapter->netdev;
570
571         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
572                 netif_carrier_off(netdev);
573                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
574         }
575
576         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
577                 netif_carrier_on(netdev);
578         else
579                 netif_carrier_off(netdev);
580 }
581
582 static void be_tx_stats_update(struct be_tx_obj *txo,
583                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
584 {
585         struct be_tx_stats *stats = tx_stats(txo);
586
587         u64_stats_update_begin(&stats->sync);
588         stats->tx_reqs++;
589         stats->tx_wrbs += wrb_cnt;
590         stats->tx_bytes += copied;
591         stats->tx_pkts += (gso_segs ? gso_segs : 1);
592         if (stopped)
593                 stats->tx_stops++;
594         u64_stats_update_end(&stats->sync);
595 }
596
597 /* Determine number of WRB entries needed to xmit data in an skb */
598 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
599                                                                 bool *dummy)
600 {
601         int cnt = (skb->len > skb->data_len);
602
603         cnt += skb_shinfo(skb)->nr_frags;
604
605         /* to account for hdr wrb */
606         cnt++;
607         if (lancer_chip(adapter) || !(cnt & 1)) {
608                 *dummy = false;
609         } else {
610                 /* add a dummy to make it an even num */
611                 cnt++;
612                 *dummy = true;
613         }
614         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
615         return cnt;
616 }
617
618 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
619 {
620         wrb->frag_pa_hi = upper_32_bits(addr);
621         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
622         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
623         wrb->rsvd0 = 0;
624 }
625
626 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
627                                         struct sk_buff *skb)
628 {
629         u8 vlan_prio;
630         u16 vlan_tag;
631
632         vlan_tag = vlan_tx_tag_get(skb);
633         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
634         /* If vlan priority provided by OS is NOT in available bmap */
635         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
636                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
637                                 adapter->recommended_prio;
638
639         return vlan_tag;
640 }
641
642 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
643 {
644         return vlan_tx_tag_present(skb) || adapter->pvid;
645 }
646
647 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
648                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
649 {
650         u16 vlan_tag;
651
652         memset(hdr, 0, sizeof(*hdr));
653
654         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
655
656         if (skb_is_gso(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
658                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
659                         hdr, skb_shinfo(skb)->gso_size);
660                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
661                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
662         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
663                 if (is_tcp_pkt(skb))
664                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
665                 else if (is_udp_pkt(skb))
666                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
667         }
668
669         if (vlan_tx_tag_present(skb)) {
670                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
671                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
672                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
673         }
674
675         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
676         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
677         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
678         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
679 }
680
681 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
682                 bool unmap_single)
683 {
684         dma_addr_t dma;
685
686         be_dws_le_to_cpu(wrb, sizeof(*wrb));
687
688         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
689         if (wrb->frag_len) {
690                 if (unmap_single)
691                         dma_unmap_single(dev, dma, wrb->frag_len,
692                                          DMA_TO_DEVICE);
693                 else
694                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
695         }
696 }
697
698 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
699                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
700 {
701         dma_addr_t busaddr;
702         int i, copied = 0;
703         struct device *dev = &adapter->pdev->dev;
704         struct sk_buff *first_skb = skb;
705         struct be_eth_wrb *wrb;
706         struct be_eth_hdr_wrb *hdr;
707         bool map_single = false;
708         u16 map_head;
709
710         hdr = queue_head_node(txq);
711         queue_head_inc(txq);
712         map_head = txq->head;
713
714         if (skb->len > skb->data_len) {
715                 int len = skb_headlen(skb);
716                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
717                 if (dma_mapping_error(dev, busaddr))
718                         goto dma_err;
719                 map_single = true;
720                 wrb = queue_head_node(txq);
721                 wrb_fill(wrb, busaddr, len);
722                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723                 queue_head_inc(txq);
724                 copied += len;
725         }
726
727         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
728                 const struct skb_frag_struct *frag =
729                         &skb_shinfo(skb)->frags[i];
730                 busaddr = skb_frag_dma_map(dev, frag, 0,
731                                            skb_frag_size(frag), DMA_TO_DEVICE);
732                 if (dma_mapping_error(dev, busaddr))
733                         goto dma_err;
734                 wrb = queue_head_node(txq);
735                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
736                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
737                 queue_head_inc(txq);
738                 copied += skb_frag_size(frag);
739         }
740
741         if (dummy_wrb) {
742                 wrb = queue_head_node(txq);
743                 wrb_fill(wrb, 0, 0);
744                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
745                 queue_head_inc(txq);
746         }
747
748         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
749         be_dws_cpu_to_le(hdr, sizeof(*hdr));
750
751         return copied;
752 dma_err:
753         txq->head = map_head;
754         while (copied) {
755                 wrb = queue_head_node(txq);
756                 unmap_tx_frag(dev, wrb, map_single);
757                 map_single = false;
758                 copied -= wrb->frag_len;
759                 queue_head_inc(txq);
760         }
761         return 0;
762 }
763
764 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
765                                              struct sk_buff *skb)
766 {
767         u16 vlan_tag = 0;
768
769         skb = skb_share_check(skb, GFP_ATOMIC);
770         if (unlikely(!skb))
771                 return skb;
772
773         if (vlan_tx_tag_present(skb)) {
774                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
775                 __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
776                 skb->vlan_tci = 0;
777         }
778
779         return skb;
780 }
781
782 static netdev_tx_t be_xmit(struct sk_buff *skb,
783                         struct net_device *netdev)
784 {
785         struct be_adapter *adapter = netdev_priv(netdev);
786         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
787         struct be_queue_info *txq = &txo->q;
788         struct iphdr *ip = NULL;
789         u32 wrb_cnt = 0, copied = 0;
790         u32 start = txq->head, eth_hdr_len;
791         bool dummy_wrb, stopped = false;
792
793         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
794                 VLAN_ETH_HLEN : ETH_HLEN;
795
796         /* HW has a bug which considers padding bytes as legal
797          * and modifies the IPv4 hdr's 'tot_len' field
798          */
799         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
800                         is_ipv4_pkt(skb)) {
801                 ip = (struct iphdr *)ip_hdr(skb);
802                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
803         }
804
805         /* HW has a bug wherein it will calculate CSUM for VLAN
806          * pkts even though it is disabled.
807          * Manually insert VLAN in pkt.
808          */
809         if (skb->ip_summed != CHECKSUM_PARTIAL &&
810                         be_vlan_tag_chk(adapter, skb)) {
811                 skb = be_insert_vlan_in_pkt(adapter, skb);
812                 if (unlikely(!skb))
813                         goto tx_drop;
814         }
815
816         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
817
818         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
819         if (copied) {
820                 int gso_segs = skb_shinfo(skb)->gso_segs;
821
822                 /* record the sent skb in the sent_skb table */
823                 BUG_ON(txo->sent_skb_list[start]);
824                 txo->sent_skb_list[start] = skb;
825
826                 /* Ensure txq has space for the next skb; Else stop the queue
827                  * *BEFORE* ringing the tx doorbell, so that we serialze the
828                  * tx compls of the current transmit which'll wake up the queue
829                  */
830                 atomic_add(wrb_cnt, &txq->used);
831                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
832                                                                 txq->len) {
833                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
834                         stopped = true;
835                 }
836
837                 be_txq_notify(adapter, txo, wrb_cnt);
838
839                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
840         } else {
841                 txq->head = start;
842                 dev_kfree_skb_any(skb);
843         }
844 tx_drop:
845         return NETDEV_TX_OK;
846 }
847
848 static int be_change_mtu(struct net_device *netdev, int new_mtu)
849 {
850         struct be_adapter *adapter = netdev_priv(netdev);
851         if (new_mtu < BE_MIN_MTU ||
852                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
853                                         (ETH_HLEN + ETH_FCS_LEN))) {
854                 dev_info(&adapter->pdev->dev,
855                         "MTU must be between %d and %d bytes\n",
856                         BE_MIN_MTU,
857                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
858                 return -EINVAL;
859         }
860         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
861                         netdev->mtu, new_mtu);
862         netdev->mtu = new_mtu;
863         return 0;
864 }
865
866 /*
867  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
868  * If the user configures more, place BE in vlan promiscuous mode.
869  */
870 static int be_vid_config(struct be_adapter *adapter)
871 {
872         u16 vids[BE_NUM_VLANS_SUPPORTED];
873         u16 num = 0, i;
874         int status = 0;
875
876         /* No need to further configure vids if in promiscuous mode */
877         if (adapter->promiscuous)
878                 return 0;
879
880         if (adapter->vlans_added > adapter->max_vlans)
881                 goto set_vlan_promisc;
882
883         /* Construct VLAN Table to give to HW */
884         for (i = 0; i < VLAN_N_VID; i++)
885                 if (adapter->vlan_tag[i])
886                         vids[num++] = cpu_to_le16(i);
887
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     vids, num, 1, 0);
890
891         /* Set to VLAN promisc mode as setting VLAN filter failed */
892         if (status) {
893                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
894                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
895                 goto set_vlan_promisc;
896         }
897
898         return status;
899
900 set_vlan_promisc:
901         status = be_cmd_vlan_config(adapter, adapter->if_handle,
902                                     NULL, 0, 1, 1);
903         return status;
904 }
905
906 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
907 {
908         struct be_adapter *adapter = netdev_priv(netdev);
909         int status = 0;
910
911         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
912                 status = -EINVAL;
913                 goto ret;
914         }
915
916         /* Packets with VID 0 are always received by Lancer by default */
917         if (lancer_chip(adapter) && vid == 0)
918                 goto ret;
919
920         adapter->vlan_tag[vid] = 1;
921         if (adapter->vlans_added <= (adapter->max_vlans + 1))
922                 status = be_vid_config(adapter);
923
924         if (!status)
925                 adapter->vlans_added++;
926         else
927                 adapter->vlan_tag[vid] = 0;
928 ret:
929         return status;
930 }
931
932 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
933 {
934         struct be_adapter *adapter = netdev_priv(netdev);
935         int status = 0;
936
937         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
938                 status = -EINVAL;
939                 goto ret;
940         }
941
942         /* Packets with VID 0 are always received by Lancer by default */
943         if (lancer_chip(adapter) && vid == 0)
944                 goto ret;
945
946         adapter->vlan_tag[vid] = 0;
947         if (adapter->vlans_added <= adapter->max_vlans)
948                 status = be_vid_config(adapter);
949
950         if (!status)
951                 adapter->vlans_added--;
952         else
953                 adapter->vlan_tag[vid] = 1;
954 ret:
955         return status;
956 }
957
958 static void be_set_rx_mode(struct net_device *netdev)
959 {
960         struct be_adapter *adapter = netdev_priv(netdev);
961         int status;
962
963         if (netdev->flags & IFF_PROMISC) {
964                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
965                 adapter->promiscuous = true;
966                 goto done;
967         }
968
969         /* BE was previously in promiscuous mode; disable it */
970         if (adapter->promiscuous) {
971                 adapter->promiscuous = false;
972                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
973
974                 if (adapter->vlans_added)
975                         be_vid_config(adapter);
976         }
977
978         /* Enable multicast promisc if num configured exceeds what we support */
979         if (netdev->flags & IFF_ALLMULTI ||
980             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
981                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
982                 goto done;
983         }
984
985         if (netdev_uc_count(netdev) != adapter->uc_macs) {
986                 struct netdev_hw_addr *ha;
987                 int i = 1; /* First slot is claimed by the Primary MAC */
988
989                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
990                         be_cmd_pmac_del(adapter, adapter->if_handle,
991                                         adapter->pmac_id[i], 0);
992                 }
993
994                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
995                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
996                         adapter->promiscuous = true;
997                         goto done;
998                 }
999
1000                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1001                         adapter->uc_macs++; /* First slot is for Primary MAC */
1002                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1003                                         adapter->if_handle,
1004                                         &adapter->pmac_id[adapter->uc_macs], 0);
1005                 }
1006         }
1007
1008         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1009
1010         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1011         if (status) {
1012                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1013                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1014                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1015         }
1016 done:
1017         return;
1018 }
1019
1020 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1021 {
1022         struct be_adapter *adapter = netdev_priv(netdev);
1023         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1024         int status;
1025         bool active_mac = false;
1026         u32 pmac_id;
1027         u8 old_mac[ETH_ALEN];
1028
1029         if (!sriov_enabled(adapter))
1030                 return -EPERM;
1031
1032         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1033                 return -EINVAL;
1034
1035         if (lancer_chip(adapter)) {
1036                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1037                                                   &pmac_id, vf + 1);
1038                 if (!status && active_mac)
1039                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1040                                         pmac_id, vf + 1);
1041
1042                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1043         } else {
1044                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1045                                          vf_cfg->pmac_id, vf + 1);
1046
1047                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1048                                          &vf_cfg->pmac_id, vf + 1);
1049         }
1050
1051         if (status)
1052                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1053                                 mac, vf);
1054         else
1055                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1056
1057         return status;
1058 }
1059
1060 static int be_get_vf_config(struct net_device *netdev, int vf,
1061                         struct ifla_vf_info *vi)
1062 {
1063         struct be_adapter *adapter = netdev_priv(netdev);
1064         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1065
1066         if (!sriov_enabled(adapter))
1067                 return -EPERM;
1068
1069         if (vf >= adapter->num_vfs)
1070                 return -EINVAL;
1071
1072         vi->vf = vf;
1073         vi->tx_rate = vf_cfg->tx_rate;
1074         vi->vlan = vf_cfg->vlan_tag;
1075         vi->qos = 0;
1076         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1077
1078         return 0;
1079 }
1080
1081 static int be_set_vf_vlan(struct net_device *netdev,
1082                         int vf, u16 vlan, u8 qos)
1083 {
1084         struct be_adapter *adapter = netdev_priv(netdev);
1085         int status = 0;
1086
1087         if (!sriov_enabled(adapter))
1088                 return -EPERM;
1089
1090         if (vf >= adapter->num_vfs || vlan > 4095)
1091                 return -EINVAL;
1092
1093         if (vlan) {
1094                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1095                         /* If this is new value, program it. Else skip. */
1096                         adapter->vf_cfg[vf].vlan_tag = vlan;
1097
1098                         status = be_cmd_set_hsw_config(adapter, vlan,
1099                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1100                 }
1101         } else {
1102                 /* Reset Transparent Vlan Tagging. */
1103                 adapter->vf_cfg[vf].vlan_tag = 0;
1104                 vlan = adapter->vf_cfg[vf].def_vid;
1105                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1106                         adapter->vf_cfg[vf].if_handle);
1107         }
1108
1109
1110         if (status)
1111                 dev_info(&adapter->pdev->dev,
1112                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1113         return status;
1114 }
1115
1116 static int be_set_vf_tx_rate(struct net_device *netdev,
1117                         int vf, int rate)
1118 {
1119         struct be_adapter *adapter = netdev_priv(netdev);
1120         int status = 0;
1121
1122         if (!sriov_enabled(adapter))
1123                 return -EPERM;
1124
1125         if (vf >= adapter->num_vfs)
1126                 return -EINVAL;
1127
1128         if (rate < 100 || rate > 10000) {
1129                 dev_err(&adapter->pdev->dev,
1130                         "tx rate must be between 100 and 10000 Mbps\n");
1131                 return -EINVAL;
1132         }
1133
1134         if (lancer_chip(adapter))
1135                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1136         else
1137                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1138
1139         if (status)
1140                 dev_err(&adapter->pdev->dev,
1141                                 "tx rate %d on VF %d failed\n", rate, vf);
1142         else
1143                 adapter->vf_cfg[vf].tx_rate = rate;
1144         return status;
1145 }
1146
1147 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1148 {
1149         struct pci_dev *dev, *pdev = adapter->pdev;
1150         int vfs = 0, assigned_vfs = 0, pos;
1151         u16 offset, stride;
1152
1153         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1154         if (!pos)
1155                 return 0;
1156         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1157         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1158
1159         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1160         while (dev) {
1161                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1162                         vfs++;
1163                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1164                                 assigned_vfs++;
1165                 }
1166                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1167         }
1168         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1169 }
1170
1171 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1172 {
1173         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1174         ulong now = jiffies;
1175         ulong delta = now - stats->rx_jiffies;
1176         u64 pkts;
1177         unsigned int start, eqd;
1178
1179         if (!eqo->enable_aic) {
1180                 eqd = eqo->eqd;
1181                 goto modify_eqd;
1182         }
1183
1184         if (eqo->idx >= adapter->num_rx_qs)
1185                 return;
1186
1187         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1188
1189         /* Wrapped around */
1190         if (time_before(now, stats->rx_jiffies)) {
1191                 stats->rx_jiffies = now;
1192                 return;
1193         }
1194
1195         /* Update once a second */
1196         if (delta < HZ)
1197                 return;
1198
1199         do {
1200                 start = u64_stats_fetch_begin_bh(&stats->sync);
1201                 pkts = stats->rx_pkts;
1202         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1203
1204         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1205         stats->rx_pkts_prev = pkts;
1206         stats->rx_jiffies = now;
1207         eqd = (stats->rx_pps / 110000) << 3;
1208         eqd = min(eqd, eqo->max_eqd);
1209         eqd = max(eqd, eqo->min_eqd);
1210         if (eqd < 10)
1211                 eqd = 0;
1212
1213 modify_eqd:
1214         if (eqd != eqo->cur_eqd) {
1215                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1216                 eqo->cur_eqd = eqd;
1217         }
1218 }
1219
1220 static void be_rx_stats_update(struct be_rx_obj *rxo,
1221                 struct be_rx_compl_info *rxcp)
1222 {
1223         struct be_rx_stats *stats = rx_stats(rxo);
1224
1225         u64_stats_update_begin(&stats->sync);
1226         stats->rx_compl++;
1227         stats->rx_bytes += rxcp->pkt_size;
1228         stats->rx_pkts++;
1229         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1230                 stats->rx_mcast_pkts++;
1231         if (rxcp->err)
1232                 stats->rx_compl_err++;
1233         u64_stats_update_end(&stats->sync);
1234 }
1235
1236 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1237 {
1238         /* L4 checksum is not reliable for non TCP/UDP packets.
1239          * Also ignore ipcksm for ipv6 pkts */
1240         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1241                                 (rxcp->ip_csum || rxcp->ipv6);
1242 }
1243
1244 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1245                                                 u16 frag_idx)
1246 {
1247         struct be_adapter *adapter = rxo->adapter;
1248         struct be_rx_page_info *rx_page_info;
1249         struct be_queue_info *rxq = &rxo->q;
1250
1251         rx_page_info = &rxo->page_info_tbl[frag_idx];
1252         BUG_ON(!rx_page_info->page);
1253
1254         if (rx_page_info->last_page_user) {
1255                 dma_unmap_page(&adapter->pdev->dev,
1256                                dma_unmap_addr(rx_page_info, bus),
1257                                adapter->big_page_size, DMA_FROM_DEVICE);
1258                 rx_page_info->last_page_user = false;
1259         }
1260
1261         atomic_dec(&rxq->used);
1262         return rx_page_info;
1263 }
1264
1265 /* Throwaway the data in the Rx completion */
1266 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1267                                 struct be_rx_compl_info *rxcp)
1268 {
1269         struct be_queue_info *rxq = &rxo->q;
1270         struct be_rx_page_info *page_info;
1271         u16 i, num_rcvd = rxcp->num_rcvd;
1272
1273         for (i = 0; i < num_rcvd; i++) {
1274                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1275                 put_page(page_info->page);
1276                 memset(page_info, 0, sizeof(*page_info));
1277                 index_inc(&rxcp->rxq_idx, rxq->len);
1278         }
1279 }
1280
1281 /*
1282  * skb_fill_rx_data forms a complete skb for an ether frame
1283  * indicated by rxcp.
1284  */
1285 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1286                              struct be_rx_compl_info *rxcp)
1287 {
1288         struct be_queue_info *rxq = &rxo->q;
1289         struct be_rx_page_info *page_info;
1290         u16 i, j;
1291         u16 hdr_len, curr_frag_len, remaining;
1292         u8 *start;
1293
1294         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1295         start = page_address(page_info->page) + page_info->page_offset;
1296         prefetch(start);
1297
1298         /* Copy data in the first descriptor of this completion */
1299         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1300
1301         skb->len = curr_frag_len;
1302         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1303                 memcpy(skb->data, start, curr_frag_len);
1304                 /* Complete packet has now been moved to data */
1305                 put_page(page_info->page);
1306                 skb->data_len = 0;
1307                 skb->tail += curr_frag_len;
1308         } else {
1309                 hdr_len = ETH_HLEN;
1310                 memcpy(skb->data, start, hdr_len);
1311                 skb_shinfo(skb)->nr_frags = 1;
1312                 skb_frag_set_page(skb, 0, page_info->page);
1313                 skb_shinfo(skb)->frags[0].page_offset =
1314                                         page_info->page_offset + hdr_len;
1315                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1316                 skb->data_len = curr_frag_len - hdr_len;
1317                 skb->truesize += rx_frag_size;
1318                 skb->tail += hdr_len;
1319         }
1320         page_info->page = NULL;
1321
1322         if (rxcp->pkt_size <= rx_frag_size) {
1323                 BUG_ON(rxcp->num_rcvd != 1);
1324                 return;
1325         }
1326
1327         /* More frags present for this completion */
1328         index_inc(&rxcp->rxq_idx, rxq->len);
1329         remaining = rxcp->pkt_size - curr_frag_len;
1330         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1331                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1332                 curr_frag_len = min(remaining, rx_frag_size);
1333
1334                 /* Coalesce all frags from the same physical page in one slot */
1335                 if (page_info->page_offset == 0) {
1336                         /* Fresh page */
1337                         j++;
1338                         skb_frag_set_page(skb, j, page_info->page);
1339                         skb_shinfo(skb)->frags[j].page_offset =
1340                                                         page_info->page_offset;
1341                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1342                         skb_shinfo(skb)->nr_frags++;
1343                 } else {
1344                         put_page(page_info->page);
1345                 }
1346
1347                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1348                 skb->len += curr_frag_len;
1349                 skb->data_len += curr_frag_len;
1350                 skb->truesize += rx_frag_size;
1351                 remaining -= curr_frag_len;
1352                 index_inc(&rxcp->rxq_idx, rxq->len);
1353                 page_info->page = NULL;
1354         }
1355         BUG_ON(j > MAX_SKB_FRAGS);
1356 }
1357
1358 /* Process the RX completion indicated by rxcp when GRO is disabled */
1359 static void be_rx_compl_process(struct be_rx_obj *rxo,
1360                                 struct be_rx_compl_info *rxcp)
1361 {
1362         struct be_adapter *adapter = rxo->adapter;
1363         struct net_device *netdev = adapter->netdev;
1364         struct sk_buff *skb;
1365
1366         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1367         if (unlikely(!skb)) {
1368                 rx_stats(rxo)->rx_drops_no_skbs++;
1369                 be_rx_compl_discard(rxo, rxcp);
1370                 return;
1371         }
1372
1373         skb_fill_rx_data(rxo, skb, rxcp);
1374
1375         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1376                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1377         else
1378                 skb_checksum_none_assert(skb);
1379
1380         skb->protocol = eth_type_trans(skb, netdev);
1381         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1382         if (netdev->features & NETIF_F_RXHASH)
1383                 skb->rxhash = rxcp->rss_hash;
1384
1385
1386         if (rxcp->vlanf)
1387                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1388
1389         netif_receive_skb(skb);
1390 }
1391
1392 /* Process the RX completion indicated by rxcp when GRO is enabled */
1393 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1394                              struct be_rx_compl_info *rxcp)
1395 {
1396         struct be_adapter *adapter = rxo->adapter;
1397         struct be_rx_page_info *page_info;
1398         struct sk_buff *skb = NULL;
1399         struct be_queue_info *rxq = &rxo->q;
1400         u16 remaining, curr_frag_len;
1401         u16 i, j;
1402
1403         skb = napi_get_frags(napi);
1404         if (!skb) {
1405                 be_rx_compl_discard(rxo, rxcp);
1406                 return;
1407         }
1408
1409         remaining = rxcp->pkt_size;
1410         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1411                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1412
1413                 curr_frag_len = min(remaining, rx_frag_size);
1414
1415                 /* Coalesce all frags from the same physical page in one slot */
1416                 if (i == 0 || page_info->page_offset == 0) {
1417                         /* First frag or Fresh page */
1418                         j++;
1419                         skb_frag_set_page(skb, j, page_info->page);
1420                         skb_shinfo(skb)->frags[j].page_offset =
1421                                                         page_info->page_offset;
1422                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1423                 } else {
1424                         put_page(page_info->page);
1425                 }
1426                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1427                 skb->truesize += rx_frag_size;
1428                 remaining -= curr_frag_len;
1429                 index_inc(&rxcp->rxq_idx, rxq->len);
1430                 memset(page_info, 0, sizeof(*page_info));
1431         }
1432         BUG_ON(j > MAX_SKB_FRAGS);
1433
1434         skb_shinfo(skb)->nr_frags = j + 1;
1435         skb->len = rxcp->pkt_size;
1436         skb->data_len = rxcp->pkt_size;
1437         skb->ip_summed = CHECKSUM_UNNECESSARY;
1438         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1439         if (adapter->netdev->features & NETIF_F_RXHASH)
1440                 skb->rxhash = rxcp->rss_hash;
1441
1442         if (rxcp->vlanf)
1443                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1444
1445         napi_gro_frags(napi);
1446 }
1447
1448 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1449                                  struct be_rx_compl_info *rxcp)
1450 {
1451         rxcp->pkt_size =
1452                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1453         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1454         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1455         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1456         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1457         rxcp->ip_csum =
1458                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1459         rxcp->l4_csum =
1460                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1461         rxcp->ipv6 =
1462                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1463         rxcp->rxq_idx =
1464                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1465         rxcp->num_rcvd =
1466                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1467         rxcp->pkt_type =
1468                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1469         rxcp->rss_hash =
1470                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1471         if (rxcp->vlanf) {
1472                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1473                                           compl);
1474                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1475                                                compl);
1476         }
1477         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1478 }
1479
1480 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1481                                  struct be_rx_compl_info *rxcp)
1482 {
1483         rxcp->pkt_size =
1484                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1485         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1486         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1487         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1488         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1489         rxcp->ip_csum =
1490                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1491         rxcp->l4_csum =
1492                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1493         rxcp->ipv6 =
1494                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1495         rxcp->rxq_idx =
1496                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1497         rxcp->num_rcvd =
1498                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1499         rxcp->pkt_type =
1500                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1501         rxcp->rss_hash =
1502                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1503         if (rxcp->vlanf) {
1504                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1505                                           compl);
1506                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1507                                                compl);
1508         }
1509         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1510 }
1511
1512 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1513 {
1514         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1515         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1516         struct be_adapter *adapter = rxo->adapter;
1517
1518         /* For checking the valid bit it is Ok to use either definition as the
1519          * valid bit is at the same position in both v0 and v1 Rx compl */
1520         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1521                 return NULL;
1522
1523         rmb();
1524         be_dws_le_to_cpu(compl, sizeof(*compl));
1525
1526         if (adapter->be3_native)
1527                 be_parse_rx_compl_v1(compl, rxcp);
1528         else
1529                 be_parse_rx_compl_v0(compl, rxcp);
1530
1531         if (rxcp->vlanf) {
1532                 /* vlanf could be wrongly set in some cards.
1533                  * ignore if vtm is not set */
1534                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1535                         rxcp->vlanf = 0;
1536
1537                 if (!lancer_chip(adapter))
1538                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1539
1540                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1541                     !adapter->vlan_tag[rxcp->vlan_tag])
1542                         rxcp->vlanf = 0;
1543         }
1544
1545         /* As the compl has been parsed, reset it; we wont touch it again */
1546         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1547
1548         queue_tail_inc(&rxo->cq);
1549         return rxcp;
1550 }
1551
1552 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1553 {
1554         u32 order = get_order(size);
1555
1556         if (order > 0)
1557                 gfp |= __GFP_COMP;
1558         return  alloc_pages(gfp, order);
1559 }
1560
1561 /*
1562  * Allocate a page, split it to fragments of size rx_frag_size and post as
1563  * receive buffers to BE
1564  */
1565 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1566 {
1567         struct be_adapter *adapter = rxo->adapter;
1568         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1569         struct be_queue_info *rxq = &rxo->q;
1570         struct page *pagep = NULL;
1571         struct be_eth_rx_d *rxd;
1572         u64 page_dmaaddr = 0, frag_dmaaddr;
1573         u32 posted, page_offset = 0;
1574
1575         page_info = &rxo->page_info_tbl[rxq->head];
1576         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1577                 if (!pagep) {
1578                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1579                         if (unlikely(!pagep)) {
1580                                 rx_stats(rxo)->rx_post_fail++;
1581                                 break;
1582                         }
1583                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1584                                                     0, adapter->big_page_size,
1585                                                     DMA_FROM_DEVICE);
1586                         page_info->page_offset = 0;
1587                 } else {
1588                         get_page(pagep);
1589                         page_info->page_offset = page_offset + rx_frag_size;
1590                 }
1591                 page_offset = page_info->page_offset;
1592                 page_info->page = pagep;
1593                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1594                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1595
1596                 rxd = queue_head_node(rxq);
1597                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1598                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1599
1600                 /* Any space left in the current big page for another frag? */
1601                 if ((page_offset + rx_frag_size + rx_frag_size) >
1602                                         adapter->big_page_size) {
1603                         pagep = NULL;
1604                         page_info->last_page_user = true;
1605                 }
1606
1607                 prev_page_info = page_info;
1608                 queue_head_inc(rxq);
1609                 page_info = &rxo->page_info_tbl[rxq->head];
1610         }
1611         if (pagep)
1612                 prev_page_info->last_page_user = true;
1613
1614         if (posted) {
1615                 atomic_add(posted, &rxq->used);
1616                 be_rxq_notify(adapter, rxq->id, posted);
1617         } else if (atomic_read(&rxq->used) == 0) {
1618                 /* Let be_worker replenish when memory is available */
1619                 rxo->rx_post_starved = true;
1620         }
1621 }
1622
1623 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1624 {
1625         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1626
1627         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1628                 return NULL;
1629
1630         rmb();
1631         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1632
1633         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1634
1635         queue_tail_inc(tx_cq);
1636         return txcp;
1637 }
1638
1639 static u16 be_tx_compl_process(struct be_adapter *adapter,
1640                 struct be_tx_obj *txo, u16 last_index)
1641 {
1642         struct be_queue_info *txq = &txo->q;
1643         struct be_eth_wrb *wrb;
1644         struct sk_buff **sent_skbs = txo->sent_skb_list;
1645         struct sk_buff *sent_skb;
1646         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1647         bool unmap_skb_hdr = true;
1648
1649         sent_skb = sent_skbs[txq->tail];
1650         BUG_ON(!sent_skb);
1651         sent_skbs[txq->tail] = NULL;
1652
1653         /* skip header wrb */
1654         queue_tail_inc(txq);
1655
1656         do {
1657                 cur_index = txq->tail;
1658                 wrb = queue_tail_node(txq);
1659                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1660                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1661                 unmap_skb_hdr = false;
1662
1663                 num_wrbs++;
1664                 queue_tail_inc(txq);
1665         } while (cur_index != last_index);
1666
1667         kfree_skb(sent_skb);
1668         return num_wrbs;
1669 }
1670
1671 /* Return the number of events in the event queue */
1672 static inline int events_get(struct be_eq_obj *eqo)
1673 {
1674         struct be_eq_entry *eqe;
1675         int num = 0;
1676
1677         do {
1678                 eqe = queue_tail_node(&eqo->q);
1679                 if (eqe->evt == 0)
1680                         break;
1681
1682                 rmb();
1683                 eqe->evt = 0;
1684                 num++;
1685                 queue_tail_inc(&eqo->q);
1686         } while (true);
1687
1688         return num;
1689 }
1690
1691 /* Leaves the EQ is disarmed state */
1692 static void be_eq_clean(struct be_eq_obj *eqo)
1693 {
1694         int num = events_get(eqo);
1695
1696         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1697 }
1698
1699 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1700 {
1701         struct be_rx_page_info *page_info;
1702         struct be_queue_info *rxq = &rxo->q;
1703         struct be_queue_info *rx_cq = &rxo->cq;
1704         struct be_rx_compl_info *rxcp;
1705         struct be_adapter *adapter = rxo->adapter;
1706         int flush_wait = 0;
1707         u16 tail;
1708
1709         /* Consume pending rx completions.
1710          * Wait for the flush completion (identified by zero num_rcvd)
1711          * to arrive. Notify CQ even when there are no more CQ entries
1712          * for HW to flush partially coalesced CQ entries.
1713          * In Lancer, there is no need to wait for flush compl.
1714          */
1715         for (;;) {
1716                 rxcp = be_rx_compl_get(rxo);
1717                 if (rxcp == NULL) {
1718                         if (lancer_chip(adapter))
1719                                 break;
1720
1721                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1722                                 dev_warn(&adapter->pdev->dev,
1723                                          "did not receive flush compl\n");
1724                                 break;
1725                         }
1726                         be_cq_notify(adapter, rx_cq->id, true, 0);
1727                         mdelay(1);
1728                 } else {
1729                         be_rx_compl_discard(rxo, rxcp);
1730                         be_cq_notify(adapter, rx_cq->id, true, 1);
1731                         if (rxcp->num_rcvd == 0)
1732                                 break;
1733                 }
1734         }
1735
1736         /* After cleanup, leave the CQ in unarmed state */
1737         be_cq_notify(adapter, rx_cq->id, false, 0);
1738
1739         /* Then free posted rx buffers that were not used */
1740         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1741         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1742                 page_info = get_rx_page_info(rxo, tail);
1743                 put_page(page_info->page);
1744                 memset(page_info, 0, sizeof(*page_info));
1745         }
1746         BUG_ON(atomic_read(&rxq->used));
1747         rxq->tail = rxq->head = 0;
1748 }
1749
1750 static void be_tx_compl_clean(struct be_adapter *adapter)
1751 {
1752         struct be_tx_obj *txo;
1753         struct be_queue_info *txq;
1754         struct be_eth_tx_compl *txcp;
1755         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1756         struct sk_buff *sent_skb;
1757         bool dummy_wrb;
1758         int i, pending_txqs;
1759
1760         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1761         do {
1762                 pending_txqs = adapter->num_tx_qs;
1763
1764                 for_all_tx_queues(adapter, txo, i) {
1765                         txq = &txo->q;
1766                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1767                                 end_idx =
1768                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1769                                                       wrb_index, txcp);
1770                                 num_wrbs += be_tx_compl_process(adapter, txo,
1771                                                                 end_idx);
1772                                 cmpl++;
1773                         }
1774                         if (cmpl) {
1775                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1776                                 atomic_sub(num_wrbs, &txq->used);
1777                                 cmpl = 0;
1778                                 num_wrbs = 0;
1779                         }
1780                         if (atomic_read(&txq->used) == 0)
1781                                 pending_txqs--;
1782                 }
1783
1784                 if (pending_txqs == 0 || ++timeo > 200)
1785                         break;
1786
1787                 mdelay(1);
1788         } while (true);
1789
1790         for_all_tx_queues(adapter, txo, i) {
1791                 txq = &txo->q;
1792                 if (atomic_read(&txq->used))
1793                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1794                                 atomic_read(&txq->used));
1795
1796                 /* free posted tx for which compls will never arrive */
1797                 while (atomic_read(&txq->used)) {
1798                         sent_skb = txo->sent_skb_list[txq->tail];
1799                         end_idx = txq->tail;
1800                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1801                                                    &dummy_wrb);
1802                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1803                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1804                         atomic_sub(num_wrbs, &txq->used);
1805                 }
1806         }
1807 }
1808
1809 static void be_evt_queues_destroy(struct be_adapter *adapter)
1810 {
1811         struct be_eq_obj *eqo;
1812         int i;
1813
1814         for_all_evt_queues(adapter, eqo, i) {
1815                 if (eqo->q.created) {
1816                         be_eq_clean(eqo);
1817                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1818                 }
1819                 be_queue_free(adapter, &eqo->q);
1820         }
1821 }
1822
1823 static int be_evt_queues_create(struct be_adapter *adapter)
1824 {
1825         struct be_queue_info *eq;
1826         struct be_eq_obj *eqo;
1827         int i, rc;
1828
1829         adapter->num_evt_qs = num_irqs(adapter);
1830
1831         for_all_evt_queues(adapter, eqo, i) {
1832                 eqo->adapter = adapter;
1833                 eqo->tx_budget = BE_TX_BUDGET;
1834                 eqo->idx = i;
1835                 eqo->max_eqd = BE_MAX_EQD;
1836                 eqo->enable_aic = true;
1837
1838                 eq = &eqo->q;
1839                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1840                                         sizeof(struct be_eq_entry));
1841                 if (rc)
1842                         return rc;
1843
1844                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1845                 if (rc)
1846                         return rc;
1847         }
1848         return 0;
1849 }
1850
1851 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1852 {
1853         struct be_queue_info *q;
1854
1855         q = &adapter->mcc_obj.q;
1856         if (q->created)
1857                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1858         be_queue_free(adapter, q);
1859
1860         q = &adapter->mcc_obj.cq;
1861         if (q->created)
1862                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1863         be_queue_free(adapter, q);
1864 }
1865
1866 /* Must be called only after TX qs are created as MCC shares TX EQ */
1867 static int be_mcc_queues_create(struct be_adapter *adapter)
1868 {
1869         struct be_queue_info *q, *cq;
1870
1871         cq = &adapter->mcc_obj.cq;
1872         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1873                         sizeof(struct be_mcc_compl)))
1874                 goto err;
1875
1876         /* Use the default EQ for MCC completions */
1877         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1878                 goto mcc_cq_free;
1879
1880         q = &adapter->mcc_obj.q;
1881         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1882                 goto mcc_cq_destroy;
1883
1884         if (be_cmd_mccq_create(adapter, q, cq))
1885                 goto mcc_q_free;
1886
1887         return 0;
1888
1889 mcc_q_free:
1890         be_queue_free(adapter, q);
1891 mcc_cq_destroy:
1892         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1893 mcc_cq_free:
1894         be_queue_free(adapter, cq);
1895 err:
1896         return -1;
1897 }
1898
1899 static void be_tx_queues_destroy(struct be_adapter *adapter)
1900 {
1901         struct be_queue_info *q;
1902         struct be_tx_obj *txo;
1903         u8 i;
1904
1905         for_all_tx_queues(adapter, txo, i) {
1906                 q = &txo->q;
1907                 if (q->created)
1908                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1909                 be_queue_free(adapter, q);
1910
1911                 q = &txo->cq;
1912                 if (q->created)
1913                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1914                 be_queue_free(adapter, q);
1915         }
1916 }
1917
1918 static int be_num_txqs_want(struct be_adapter *adapter)
1919 {
1920         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1921             be_is_mc(adapter) ||
1922             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1923             BE2_chip(adapter))
1924                 return 1;
1925         else
1926                 return adapter->max_tx_queues;
1927 }
1928
1929 static int be_tx_cqs_create(struct be_adapter *adapter)
1930 {
1931         struct be_queue_info *cq, *eq;
1932         int status;
1933         struct be_tx_obj *txo;
1934         u8 i;
1935
1936         adapter->num_tx_qs = be_num_txqs_want(adapter);
1937         if (adapter->num_tx_qs != MAX_TX_QS) {
1938                 rtnl_lock();
1939                 netif_set_real_num_tx_queues(adapter->netdev,
1940                         adapter->num_tx_qs);
1941                 rtnl_unlock();
1942         }
1943
1944         for_all_tx_queues(adapter, txo, i) {
1945                 cq = &txo->cq;
1946                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1947                                         sizeof(struct be_eth_tx_compl));
1948                 if (status)
1949                         return status;
1950
1951                 /* If num_evt_qs is less than num_tx_qs, then more than
1952                  * one txq share an eq
1953                  */
1954                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1955                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1956                 if (status)
1957                         return status;
1958         }
1959         return 0;
1960 }
1961
1962 static int be_tx_qs_create(struct be_adapter *adapter)
1963 {
1964         struct be_tx_obj *txo;
1965         int i, status;
1966
1967         for_all_tx_queues(adapter, txo, i) {
1968                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1969                                         sizeof(struct be_eth_wrb));
1970                 if (status)
1971                         return status;
1972
1973                 status = be_cmd_txq_create(adapter, txo);
1974                 if (status)
1975                         return status;
1976         }
1977
1978         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1979                  adapter->num_tx_qs);
1980         return 0;
1981 }
1982
1983 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1984 {
1985         struct be_queue_info *q;
1986         struct be_rx_obj *rxo;
1987         int i;
1988
1989         for_all_rx_queues(adapter, rxo, i) {
1990                 q = &rxo->cq;
1991                 if (q->created)
1992                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1993                 be_queue_free(adapter, q);
1994         }
1995 }
1996
1997 static int be_rx_cqs_create(struct be_adapter *adapter)
1998 {
1999         struct be_queue_info *eq, *cq;
2000         struct be_rx_obj *rxo;
2001         int rc, i;
2002
2003         /* We'll create as many RSS rings as there are irqs.
2004          * But when there's only one irq there's no use creating RSS rings
2005          */
2006         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2007                                 num_irqs(adapter) + 1 : 1;
2008         if (adapter->num_rx_qs != MAX_RX_QS) {
2009                 rtnl_lock();
2010                 netif_set_real_num_rx_queues(adapter->netdev,
2011                                              adapter->num_rx_qs);
2012                 rtnl_unlock();
2013         }
2014
2015         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2016         for_all_rx_queues(adapter, rxo, i) {
2017                 rxo->adapter = adapter;
2018                 cq = &rxo->cq;
2019                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2020                                 sizeof(struct be_eth_rx_compl));
2021                 if (rc)
2022                         return rc;
2023
2024                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2025                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2026                 if (rc)
2027                         return rc;
2028         }
2029
2030         dev_info(&adapter->pdev->dev,
2031                  "created %d RSS queue(s) and 1 default RX queue\n",
2032                  adapter->num_rx_qs - 1);
2033         return 0;
2034 }
2035
2036 static irqreturn_t be_intx(int irq, void *dev)
2037 {
2038         struct be_eq_obj *eqo = dev;
2039         struct be_adapter *adapter = eqo->adapter;
2040         int num_evts = 0;
2041
2042         /* IRQ is not expected when NAPI is scheduled as the EQ
2043          * will not be armed.
2044          * But, this can happen on Lancer INTx where it takes
2045          * a while to de-assert INTx or in BE2 where occasionaly
2046          * an interrupt may be raised even when EQ is unarmed.
2047          * If NAPI is already scheduled, then counting & notifying
2048          * events will orphan them.
2049          */
2050         if (napi_schedule_prep(&eqo->napi)) {
2051                 num_evts = events_get(eqo);
2052                 __napi_schedule(&eqo->napi);
2053                 if (num_evts)
2054                         eqo->spurious_intr = 0;
2055         }
2056         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2057
2058         /* Return IRQ_HANDLED only for the the first spurious intr
2059          * after a valid intr to stop the kernel from branding
2060          * this irq as a bad one!
2061          */
2062         if (num_evts || eqo->spurious_intr++ == 0)
2063                 return IRQ_HANDLED;
2064         else
2065                 return IRQ_NONE;
2066 }
2067
2068 static irqreturn_t be_msix(int irq, void *dev)
2069 {
2070         struct be_eq_obj *eqo = dev;
2071
2072         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2073         napi_schedule(&eqo->napi);
2074         return IRQ_HANDLED;
2075 }
2076
2077 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2078 {
2079         return (rxcp->tcpf && !rxcp->err) ? true : false;
2080 }
2081
2082 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2083                         int budget)
2084 {
2085         struct be_adapter *adapter = rxo->adapter;
2086         struct be_queue_info *rx_cq = &rxo->cq;
2087         struct be_rx_compl_info *rxcp;
2088         u32 work_done;
2089
2090         for (work_done = 0; work_done < budget; work_done++) {
2091                 rxcp = be_rx_compl_get(rxo);
2092                 if (!rxcp)
2093                         break;
2094
2095                 /* Is it a flush compl that has no data */
2096                 if (unlikely(rxcp->num_rcvd == 0))
2097                         goto loop_continue;
2098
2099                 /* Discard compl with partial DMA Lancer B0 */
2100                 if (unlikely(!rxcp->pkt_size)) {
2101                         be_rx_compl_discard(rxo, rxcp);
2102                         goto loop_continue;
2103                 }
2104
2105                 /* On BE drop pkts that arrive due to imperfect filtering in
2106                  * promiscuous mode on some skews
2107                  */
2108                 if (unlikely(rxcp->port != adapter->port_num &&
2109                                 !lancer_chip(adapter))) {
2110                         be_rx_compl_discard(rxo, rxcp);
2111                         goto loop_continue;
2112                 }
2113
2114                 if (do_gro(rxcp))
2115                         be_rx_compl_process_gro(rxo, napi, rxcp);
2116                 else
2117                         be_rx_compl_process(rxo, rxcp);
2118 loop_continue:
2119                 be_rx_stats_update(rxo, rxcp);
2120         }
2121
2122         if (work_done) {
2123                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2124
2125                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2126                         be_post_rx_frags(rxo, GFP_ATOMIC);
2127         }
2128
2129         return work_done;
2130 }
2131
2132 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2133                           int budget, int idx)
2134 {
2135         struct be_eth_tx_compl *txcp;
2136         int num_wrbs = 0, work_done;
2137
2138         for (work_done = 0; work_done < budget; work_done++) {
2139                 txcp = be_tx_compl_get(&txo->cq);
2140                 if (!txcp)
2141                         break;
2142                 num_wrbs += be_tx_compl_process(adapter, txo,
2143                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2144                                         wrb_index, txcp));
2145         }
2146
2147         if (work_done) {
2148                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2149                 atomic_sub(num_wrbs, &txo->q.used);
2150
2151                 /* As Tx wrbs have been freed up, wake up netdev queue
2152                  * if it was stopped due to lack of tx wrbs.  */
2153                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2154                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2155                         netif_wake_subqueue(adapter->netdev, idx);
2156                 }
2157
2158                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2159                 tx_stats(txo)->tx_compl += work_done;
2160                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2161         }
2162         return (work_done < budget); /* Done */
2163 }
2164
2165 int be_poll(struct napi_struct *napi, int budget)
2166 {
2167         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2168         struct be_adapter *adapter = eqo->adapter;
2169         int max_work = 0, work, i, num_evts;
2170         bool tx_done;
2171
2172         num_evts = events_get(eqo);
2173
2174         /* Process all TXQs serviced by this EQ */
2175         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2176                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2177                                         eqo->tx_budget, i);
2178                 if (!tx_done)
2179                         max_work = budget;
2180         }
2181
2182         /* This loop will iterate twice for EQ0 in which
2183          * completions of the last RXQ (default one) are also processed
2184          * For other EQs the loop iterates only once
2185          */
2186         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2187                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2188                 max_work = max(work, max_work);
2189         }
2190
2191         if (is_mcc_eqo(eqo))
2192                 be_process_mcc(adapter);
2193
2194         if (max_work < budget) {
2195                 napi_complete(napi);
2196                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2197         } else {
2198                 /* As we'll continue in polling mode, count and clear events */
2199                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2200         }
2201         return max_work;
2202 }
2203
2204 void be_detect_error(struct be_adapter *adapter)
2205 {
2206         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2207         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2208         u32 i;
2209
2210         if (be_hw_error(adapter))
2211                 return;
2212
2213         if (lancer_chip(adapter)) {
2214                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2215                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2216                         sliport_err1 = ioread32(adapter->db +
2217                                         SLIPORT_ERROR1_OFFSET);
2218                         sliport_err2 = ioread32(adapter->db +
2219                                         SLIPORT_ERROR2_OFFSET);
2220                 }
2221         } else {
2222                 pci_read_config_dword(adapter->pdev,
2223                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2224                 pci_read_config_dword(adapter->pdev,
2225                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2226                 pci_read_config_dword(adapter->pdev,
2227                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2228                 pci_read_config_dword(adapter->pdev,
2229                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2230
2231                 ue_lo = (ue_lo & ~ue_lo_mask);
2232                 ue_hi = (ue_hi & ~ue_hi_mask);
2233         }
2234
2235         /* On certain platforms BE hardware can indicate spurious UEs.
2236          * Allow the h/w to stop working completely in case of a real UE.
2237          * Hence not setting the hw_error for UE detection.
2238          */
2239         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2240                 adapter->hw_error = true;
2241                 dev_err(&adapter->pdev->dev,
2242                         "Error detected in the card\n");
2243         }
2244
2245         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2246                 dev_err(&adapter->pdev->dev,
2247                         "ERR: sliport status 0x%x\n", sliport_status);
2248                 dev_err(&adapter->pdev->dev,
2249                         "ERR: sliport error1 0x%x\n", sliport_err1);
2250                 dev_err(&adapter->pdev->dev,
2251                         "ERR: sliport error2 0x%x\n", sliport_err2);
2252         }
2253
2254         if (ue_lo) {
2255                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2256                         if (ue_lo & 1)
2257                                 dev_err(&adapter->pdev->dev,
2258                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2259                 }
2260         }
2261
2262         if (ue_hi) {
2263                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2264                         if (ue_hi & 1)
2265                                 dev_err(&adapter->pdev->dev,
2266                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2267                 }
2268         }
2269
2270 }
2271
2272 static void be_msix_disable(struct be_adapter *adapter)
2273 {
2274         if (msix_enabled(adapter)) {
2275                 pci_disable_msix(adapter->pdev);
2276                 adapter->num_msix_vec = 0;
2277         }
2278 }
2279
2280 static uint be_num_rss_want(struct be_adapter *adapter)
2281 {
2282         u32 num = 0;
2283
2284         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2285             (lancer_chip(adapter) ||
2286              (!sriov_want(adapter) && be_physfn(adapter)))) {
2287                 num = adapter->max_rss_queues;
2288                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2289         }
2290         return num;
2291 }
2292
2293 static void be_msix_enable(struct be_adapter *adapter)
2294 {
2295 #define BE_MIN_MSIX_VECTORS             1
2296         int i, status, num_vec, num_roce_vec = 0;
2297         struct device *dev = &adapter->pdev->dev;
2298
2299         /* If RSS queues are not used, need a vec for default RX Q */
2300         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2301         if (be_roce_supported(adapter)) {
2302                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2303                                         (num_online_cpus() + 1));
2304                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2305                 num_vec += num_roce_vec;
2306                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2307         }
2308         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2309
2310         for (i = 0; i < num_vec; i++)
2311                 adapter->msix_entries[i].entry = i;
2312
2313         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2314         if (status == 0) {
2315                 goto done;
2316         } else if (status >= BE_MIN_MSIX_VECTORS) {
2317                 num_vec = status;
2318                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2319                                 num_vec) == 0)
2320                         goto done;
2321         }
2322
2323         dev_warn(dev, "MSIx enable failed\n");
2324         return;
2325 done:
2326         if (be_roce_supported(adapter)) {
2327                 if (num_vec > num_roce_vec) {
2328                         adapter->num_msix_vec = num_vec - num_roce_vec;
2329                         adapter->num_msix_roce_vec =
2330                                 num_vec - adapter->num_msix_vec;
2331                 } else {
2332                         adapter->num_msix_vec = num_vec;
2333                         adapter->num_msix_roce_vec = 0;
2334                 }
2335         } else
2336                 adapter->num_msix_vec = num_vec;
2337         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2338         return;
2339 }
2340
2341 static inline int be_msix_vec_get(struct be_adapter *adapter,
2342                                 struct be_eq_obj *eqo)
2343 {
2344         return adapter->msix_entries[eqo->idx].vector;
2345 }
2346
2347 static int be_msix_register(struct be_adapter *adapter)
2348 {
2349         struct net_device *netdev = adapter->netdev;
2350         struct be_eq_obj *eqo;
2351         int status, i, vec;
2352
2353         for_all_evt_queues(adapter, eqo, i) {
2354                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2355                 vec = be_msix_vec_get(adapter, eqo);
2356                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2357                 if (status)
2358                         goto err_msix;
2359         }
2360
2361         return 0;
2362 err_msix:
2363         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2364                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2365         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2366                 status);
2367         be_msix_disable(adapter);
2368         return status;
2369 }
2370
2371 static int be_irq_register(struct be_adapter *adapter)
2372 {
2373         struct net_device *netdev = adapter->netdev;
2374         int status;
2375
2376         if (msix_enabled(adapter)) {
2377                 status = be_msix_register(adapter);
2378                 if (status == 0)
2379                         goto done;
2380                 /* INTx is not supported for VF */
2381                 if (!be_physfn(adapter))
2382                         return status;
2383         }
2384
2385         /* INTx: only the first EQ is used */
2386         netdev->irq = adapter->pdev->irq;
2387         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2388                              &adapter->eq_obj[0]);
2389         if (status) {
2390                 dev_err(&adapter->pdev->dev,
2391                         "INTx request IRQ failed - err %d\n", status);
2392                 return status;
2393         }
2394 done:
2395         adapter->isr_registered = true;
2396         return 0;
2397 }
2398
2399 static void be_irq_unregister(struct be_adapter *adapter)
2400 {
2401         struct net_device *netdev = adapter->netdev;
2402         struct be_eq_obj *eqo;
2403         int i;
2404
2405         if (!adapter->isr_registered)
2406                 return;
2407
2408         /* INTx */
2409         if (!msix_enabled(adapter)) {
2410                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2411                 goto done;
2412         }
2413
2414         /* MSIx */
2415         for_all_evt_queues(adapter, eqo, i)
2416                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2417
2418 done:
2419         adapter->isr_registered = false;
2420 }
2421
2422 static void be_rx_qs_destroy(struct be_adapter *adapter)
2423 {
2424         struct be_queue_info *q;
2425         struct be_rx_obj *rxo;
2426         int i;
2427
2428         for_all_rx_queues(adapter, rxo, i) {
2429                 q = &rxo->q;
2430                 if (q->created) {
2431                         be_cmd_rxq_destroy(adapter, q);
2432                         /* After the rxq is invalidated, wait for a grace time
2433                          * of 1ms for all dma to end and the flush compl to
2434                          * arrive
2435                          */
2436                         mdelay(1);
2437                         be_rx_cq_clean(rxo);
2438                 }
2439                 be_queue_free(adapter, q);
2440         }
2441 }
2442
2443 static int be_close(struct net_device *netdev)
2444 {
2445         struct be_adapter *adapter = netdev_priv(netdev);
2446         struct be_eq_obj *eqo;
2447         int i;
2448
2449         be_roce_dev_close(adapter);
2450
2451         for_all_evt_queues(adapter, eqo, i)
2452                 napi_disable(&eqo->napi);
2453
2454         be_async_mcc_disable(adapter);
2455
2456         /* Wait for all pending tx completions to arrive so that
2457          * all tx skbs are freed.
2458          */
2459         be_tx_compl_clean(adapter);
2460
2461         be_rx_qs_destroy(adapter);
2462
2463         for_all_evt_queues(adapter, eqo, i) {
2464                 if (msix_enabled(adapter))
2465                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2466                 else
2467                         synchronize_irq(netdev->irq);
2468                 be_eq_clean(eqo);
2469         }
2470
2471         be_irq_unregister(adapter);
2472
2473         return 0;
2474 }
2475
2476 static int be_rx_qs_create(struct be_adapter *adapter)
2477 {
2478         struct be_rx_obj *rxo;
2479         int rc, i, j;
2480         u8 rsstable[128];
2481
2482         for_all_rx_queues(adapter, rxo, i) {
2483                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2484                                     sizeof(struct be_eth_rx_d));
2485                 if (rc)
2486                         return rc;
2487         }
2488
2489         /* The FW would like the default RXQ to be created first */
2490         rxo = default_rxo(adapter);
2491         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2492                                adapter->if_handle, false, &rxo->rss_id);
2493         if (rc)
2494                 return rc;
2495
2496         for_all_rss_queues(adapter, rxo, i) {
2497                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2498                                        rx_frag_size, adapter->if_handle,
2499                                        true, &rxo->rss_id);
2500                 if (rc)
2501                         return rc;
2502         }
2503
2504         if (be_multi_rxq(adapter)) {
2505                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2506                         for_all_rss_queues(adapter, rxo, i) {
2507                                 if ((j + i) >= 128)
2508                                         break;
2509                                 rsstable[j + i] = rxo->rss_id;
2510                         }
2511                 }
2512                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2513                 if (rc)
2514                         return rc;
2515         }
2516
2517         /* First time posting */
2518         for_all_rx_queues(adapter, rxo, i)
2519                 be_post_rx_frags(rxo, GFP_KERNEL);
2520         return 0;
2521 }
2522
2523 static int be_open(struct net_device *netdev)
2524 {
2525         struct be_adapter *adapter = netdev_priv(netdev);
2526         struct be_eq_obj *eqo;
2527         struct be_rx_obj *rxo;
2528         struct be_tx_obj *txo;
2529         u8 link_status;
2530         int status, i;
2531
2532         status = be_rx_qs_create(adapter);
2533         if (status)
2534                 goto err;
2535
2536         be_irq_register(adapter);
2537
2538         for_all_rx_queues(adapter, rxo, i)
2539                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2540
2541         for_all_tx_queues(adapter, txo, i)
2542                 be_cq_notify(adapter, txo->cq.id, true, 0);
2543
2544         be_async_mcc_enable(adapter);
2545
2546         for_all_evt_queues(adapter, eqo, i) {
2547                 napi_enable(&eqo->napi);
2548                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2549         }
2550
2551         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2552         if (!status)
2553                 be_link_status_update(adapter, link_status);
2554
2555         be_roce_dev_open(adapter);
2556         return 0;
2557 err:
2558         be_close(adapter->netdev);
2559         return -EIO;
2560 }
2561
2562 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2563 {
2564         struct be_dma_mem cmd;
2565         int status = 0;
2566         u8 mac[ETH_ALEN];
2567
2568         memset(mac, 0, ETH_ALEN);
2569
2570         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2571         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2572                                     GFP_KERNEL | __GFP_ZERO);
2573         if (cmd.va == NULL)
2574                 return -1;
2575
2576         if (enable) {
2577                 status = pci_write_config_dword(adapter->pdev,
2578                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2579                 if (status) {
2580                         dev_err(&adapter->pdev->dev,
2581                                 "Could not enable Wake-on-lan\n");
2582                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2583                                           cmd.dma);
2584                         return status;
2585                 }
2586                 status = be_cmd_enable_magic_wol(adapter,
2587                                 adapter->netdev->dev_addr, &cmd);
2588                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2589                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2590         } else {
2591                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2592                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2593                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2594         }
2595
2596         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2597         return status;
2598 }
2599
2600 /*
2601  * Generate a seed MAC address from the PF MAC Address using jhash.
2602  * MAC Address for VFs are assigned incrementally starting from the seed.
2603  * These addresses are programmed in the ASIC by the PF and the VF driver
2604  * queries for the MAC address during its probe.
2605  */
2606 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2607 {
2608         u32 vf;
2609         int status = 0;
2610         u8 mac[ETH_ALEN];
2611         struct be_vf_cfg *vf_cfg;
2612
2613         be_vf_eth_addr_generate(adapter, mac);
2614
2615         for_all_vfs(adapter, vf_cfg, vf) {
2616                 if (lancer_chip(adapter)) {
2617                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2618                 } else {
2619                         status = be_cmd_pmac_add(adapter, mac,
2620                                                  vf_cfg->if_handle,
2621                                                  &vf_cfg->pmac_id, vf + 1);
2622                 }
2623
2624                 if (status)
2625                         dev_err(&adapter->pdev->dev,
2626                         "Mac address assignment failed for VF %d\n", vf);
2627                 else
2628                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2629
2630                 mac[5] += 1;
2631         }
2632         return status;
2633 }
2634
2635 static int be_vfs_mac_query(struct be_adapter *adapter)
2636 {
2637         int status, vf;
2638         u8 mac[ETH_ALEN];
2639         struct be_vf_cfg *vf_cfg;
2640         bool active;
2641
2642         for_all_vfs(adapter, vf_cfg, vf) {
2643                 be_cmd_get_mac_from_list(adapter, mac, &active,
2644                                          &vf_cfg->pmac_id, 0);
2645
2646                 status = be_cmd_mac_addr_query(adapter, mac, false,
2647                                                vf_cfg->if_handle, 0);
2648                 if (status)
2649                         return status;
2650                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2651         }
2652         return 0;
2653 }
2654
2655 static void be_vf_clear(struct be_adapter *adapter)
2656 {
2657         struct be_vf_cfg *vf_cfg;
2658         u32 vf;
2659
2660         if (be_find_vfs(adapter, ASSIGNED)) {
2661                 dev_warn(&adapter->pdev->dev,
2662                          "VFs are assigned to VMs: not disabling VFs\n");
2663                 goto done;
2664         }
2665
2666         for_all_vfs(adapter, vf_cfg, vf) {
2667                 if (lancer_chip(adapter))
2668                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2669                 else
2670                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2671                                         vf_cfg->pmac_id, vf + 1);
2672
2673                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2674         }
2675         pci_disable_sriov(adapter->pdev);
2676 done:
2677         kfree(adapter->vf_cfg);
2678         adapter->num_vfs = 0;
2679 }
2680
2681 static int be_clear(struct be_adapter *adapter)
2682 {
2683         int i = 1;
2684
2685         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2686                 cancel_delayed_work_sync(&adapter->work);
2687                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2688         }
2689
2690         if (sriov_enabled(adapter))
2691                 be_vf_clear(adapter);
2692
2693         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2694                 be_cmd_pmac_del(adapter, adapter->if_handle,
2695                         adapter->pmac_id[i], 0);
2696
2697         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2698
2699         be_mcc_queues_destroy(adapter);
2700         be_rx_cqs_destroy(adapter);
2701         be_tx_queues_destroy(adapter);
2702         be_evt_queues_destroy(adapter);
2703
2704         kfree(adapter->pmac_id);
2705         adapter->pmac_id = NULL;
2706
2707         be_msix_disable(adapter);
2708         return 0;
2709 }
2710
2711 static int be_vfs_if_create(struct be_adapter *adapter)
2712 {
2713         struct be_vf_cfg *vf_cfg;
2714         u32 cap_flags, en_flags, vf;
2715         int status;
2716
2717         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2718                     BE_IF_FLAGS_MULTICAST;
2719
2720         for_all_vfs(adapter, vf_cfg, vf) {
2721                 if (!BE3_chip(adapter))
2722                         be_cmd_get_profile_config(adapter, &cap_flags,
2723                                                   NULL, vf + 1);
2724
2725                 /* If a FW profile exists, then cap_flags are updated */
2726                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2727                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2728                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2729                                           &vf_cfg->if_handle, vf + 1);
2730                 if (status)
2731                         goto err;
2732         }
2733 err:
2734         return status;
2735 }
2736
2737 static int be_vf_setup_init(struct be_adapter *adapter)
2738 {
2739         struct be_vf_cfg *vf_cfg;
2740         int vf;
2741
2742         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2743                                   GFP_KERNEL);
2744         if (!adapter->vf_cfg)
2745                 return -ENOMEM;
2746
2747         for_all_vfs(adapter, vf_cfg, vf) {
2748                 vf_cfg->if_handle = -1;
2749                 vf_cfg->pmac_id = -1;
2750         }
2751         return 0;
2752 }
2753
2754 static int be_vf_setup(struct be_adapter *adapter)
2755 {
2756         struct be_vf_cfg *vf_cfg;
2757         u16 def_vlan, lnk_speed;
2758         int status, old_vfs, vf;
2759         struct device *dev = &adapter->pdev->dev;
2760
2761         old_vfs = be_find_vfs(adapter, ENABLED);
2762         if (old_vfs) {
2763                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2764                 if (old_vfs != num_vfs)
2765                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2766                 adapter->num_vfs = old_vfs;
2767         } else {
2768                 if (num_vfs > adapter->dev_num_vfs)
2769                         dev_info(dev, "Device supports %d VFs and not %d\n",
2770                                  adapter->dev_num_vfs, num_vfs);
2771                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2772
2773                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2774                 if (status) {
2775                         dev_err(dev, "SRIOV enable failed\n");
2776                         adapter->num_vfs = 0;
2777                         return 0;
2778                 }
2779         }
2780
2781         status = be_vf_setup_init(adapter);
2782         if (status)
2783                 goto err;
2784
2785         if (old_vfs) {
2786                 for_all_vfs(adapter, vf_cfg, vf) {
2787                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2788                         if (status)
2789                                 goto err;
2790                 }
2791         } else {
2792                 status = be_vfs_if_create(adapter);
2793                 if (status)
2794                         goto err;
2795         }
2796
2797         if (old_vfs) {
2798                 status = be_vfs_mac_query(adapter);
2799                 if (status)
2800                         goto err;
2801         } else {
2802                 status = be_vf_eth_addr_config(adapter);
2803                 if (status)
2804                         goto err;
2805         }
2806
2807         for_all_vfs(adapter, vf_cfg, vf) {
2808                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2809                  * Allow full available bandwidth
2810                  */
2811                 if (BE3_chip(adapter) && !old_vfs)
2812                         be_cmd_set_qos(adapter, 1000, vf+1);
2813
2814                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2815                                                   NULL, vf + 1);
2816                 if (!status)
2817                         vf_cfg->tx_rate = lnk_speed;
2818
2819                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2820                                                vf + 1, vf_cfg->if_handle);
2821                 if (status)
2822                         goto err;
2823                 vf_cfg->def_vid = def_vlan;
2824
2825                 be_cmd_enable_vf(adapter, vf + 1);
2826         }
2827         return 0;
2828 err:
2829         dev_err(dev, "VF setup failed\n");
2830         be_vf_clear(adapter);
2831         return status;
2832 }
2833
2834 static void be_setup_init(struct be_adapter *adapter)
2835 {
2836         adapter->vlan_prio_bmap = 0xff;
2837         adapter->phy.link_speed = -1;
2838         adapter->if_handle = -1;
2839         adapter->be3_native = false;
2840         adapter->promiscuous = false;
2841         if (be_physfn(adapter))
2842                 adapter->cmd_privileges = MAX_PRIVILEGES;
2843         else
2844                 adapter->cmd_privileges = MIN_PRIVILEGES;
2845 }
2846
2847 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2848                            bool *active_mac, u32 *pmac_id)
2849 {
2850         int status = 0;
2851
2852         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2853                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2854                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2855                         *active_mac = true;
2856                 else
2857                         *active_mac = false;
2858
2859                 return status;
2860         }
2861
2862         if (lancer_chip(adapter)) {
2863                 status = be_cmd_get_mac_from_list(adapter, mac,
2864                                                   active_mac, pmac_id, 0);
2865                 if (*active_mac) {
2866                         status = be_cmd_mac_addr_query(adapter, mac, false,
2867                                                        if_handle, *pmac_id);
2868                 }
2869         } else if (be_physfn(adapter)) {
2870                 /* For BE3, for PF get permanent MAC */
2871                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2872                 *active_mac = false;
2873         } else {
2874                 /* For BE3, for VF get soft MAC assigned by PF*/
2875                 status = be_cmd_mac_addr_query(adapter, mac, false,
2876                                                if_handle, 0);
2877                 *active_mac = true;
2878         }
2879         return status;
2880 }
2881
2882 static void be_get_resources(struct be_adapter *adapter)
2883 {
2884         u16 dev_num_vfs;
2885         int pos, status;
2886         bool profile_present = false;
2887         u16 txq_count = 0;
2888
2889         if (!BEx_chip(adapter)) {
2890                 status = be_cmd_get_func_config(adapter);
2891                 if (!status)
2892                         profile_present = true;
2893         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
2894                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
2895         }
2896
2897         if (profile_present) {
2898                 /* Sanity fixes for Lancer */
2899                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2900                                               BE_UC_PMAC_COUNT);
2901                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2902                                            BE_NUM_VLANS_SUPPORTED);
2903                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2904                                                BE_MAX_MC);
2905                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2906                                                MAX_TX_QS);
2907                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2908                                                 BE3_MAX_RSS_QS);
2909                 adapter->max_event_queues = min_t(u16,
2910                                                   adapter->max_event_queues,
2911                                                   BE3_MAX_RSS_QS);
2912
2913                 if (adapter->max_rss_queues &&
2914                     adapter->max_rss_queues == adapter->max_rx_queues)
2915                         adapter->max_rss_queues -= 1;
2916
2917                 if (adapter->max_event_queues < adapter->max_rss_queues)
2918                         adapter->max_rss_queues = adapter->max_event_queues;
2919
2920         } else {
2921                 if (be_physfn(adapter))
2922                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2923                 else
2924                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2925
2926                 if (adapter->function_mode & FLEX10_MODE)
2927                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2928                 else
2929                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2930
2931                 adapter->max_mcast_mac = BE_MAX_MC;
2932                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
2933                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2934                                                MAX_TX_QS);
2935                 adapter->max_rss_queues = (adapter->be3_native) ?
2936                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2937                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2938
2939                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2940                                         BE_IF_FLAGS_BROADCAST |
2941                                         BE_IF_FLAGS_MULTICAST |
2942                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2943                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2944                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2945                                         BE_IF_FLAGS_PROMISCUOUS;
2946
2947                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2948                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2949         }
2950
2951         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2952         if (pos) {
2953                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2954                                      &dev_num_vfs);
2955                 if (BE3_chip(adapter))
2956                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2957                 adapter->dev_num_vfs = dev_num_vfs;
2958         }
2959 }
2960
2961 /* Routine to query per function resource limits */
2962 static int be_get_config(struct be_adapter *adapter)
2963 {
2964         int status;
2965
2966         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2967                                      &adapter->function_mode,
2968                                      &adapter->function_caps,
2969                                      &adapter->asic_rev);
2970         if (status)
2971                 goto err;
2972
2973         be_get_resources(adapter);
2974
2975         /* primary mac needs 1 pmac entry */
2976         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2977                                    sizeof(u32), GFP_KERNEL);
2978         if (!adapter->pmac_id) {
2979                 status = -ENOMEM;
2980                 goto err;
2981         }
2982
2983 err:
2984         return status;
2985 }
2986
2987 static int be_setup(struct be_adapter *adapter)
2988 {
2989         struct device *dev = &adapter->pdev->dev;
2990         u32 en_flags;
2991         u32 tx_fc, rx_fc;
2992         int status;
2993         u8 mac[ETH_ALEN];
2994         bool active_mac;
2995
2996         be_setup_init(adapter);
2997
2998         if (!lancer_chip(adapter))
2999                 be_cmd_req_native_mode(adapter);
3000
3001         status = be_get_config(adapter);
3002         if (status)
3003                 goto err;
3004
3005         be_msix_enable(adapter);
3006
3007         status = be_evt_queues_create(adapter);
3008         if (status)
3009                 goto err;
3010
3011         status = be_tx_cqs_create(adapter);
3012         if (status)
3013                 goto err;
3014
3015         status = be_rx_cqs_create(adapter);
3016         if (status)
3017                 goto err;
3018
3019         status = be_mcc_queues_create(adapter);
3020         if (status)
3021                 goto err;
3022
3023         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3024         /* In UMC mode FW does not return right privileges.
3025          * Override with correct privilege equivalent to PF.
3026          */
3027         if (be_is_mc(adapter))
3028                 adapter->cmd_privileges = MAX_PRIVILEGES;
3029
3030         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3031                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3032
3033         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3034                 en_flags |= BE_IF_FLAGS_RSS;
3035
3036         en_flags = en_flags & adapter->if_cap_flags;
3037
3038         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3039                                   &adapter->if_handle, 0);
3040         if (status != 0)
3041                 goto err;
3042
3043         memset(mac, 0, ETH_ALEN);
3044         active_mac = false;
3045         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3046                                  &active_mac, &adapter->pmac_id[0]);
3047         if (status != 0)
3048                 goto err;
3049
3050         if (!active_mac) {
3051                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3052                                          &adapter->pmac_id[0], 0);
3053                 if (status != 0)
3054                         goto err;
3055         }
3056
3057         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3058                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3059                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3060         }
3061
3062         status = be_tx_qs_create(adapter);
3063         if (status)
3064                 goto err;
3065
3066         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3067
3068         if (adapter->vlans_added)
3069                 be_vid_config(adapter);
3070
3071         be_set_rx_mode(adapter->netdev);
3072
3073         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3074
3075         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3076                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3077                                         adapter->rx_fc);
3078
3079         if (be_physfn(adapter) && num_vfs) {
3080                 if (adapter->dev_num_vfs)
3081                         be_vf_setup(adapter);
3082                 else
3083                         dev_warn(dev, "device doesn't support SRIOV\n");
3084         }
3085
3086         status = be_cmd_get_phy_info(adapter);
3087         if (!status && be_pause_supported(adapter))
3088                 adapter->phy.fc_autoneg = 1;
3089
3090         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3091         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3092         return 0;
3093 err:
3094         be_clear(adapter);
3095         return status;
3096 }
3097
3098 #ifdef CONFIG_NET_POLL_CONTROLLER
3099 static void be_netpoll(struct net_device *netdev)
3100 {
3101         struct be_adapter *adapter = netdev_priv(netdev);
3102         struct be_eq_obj *eqo;
3103         int i;
3104
3105         for_all_evt_queues(adapter, eqo, i) {
3106                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3107                 napi_schedule(&eqo->napi);
3108         }
3109
3110         return;
3111 }
3112 #endif
3113
3114 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3115 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3116
3117 static bool be_flash_redboot(struct be_adapter *adapter,
3118                         const u8 *p, u32 img_start, int image_size,
3119                         int hdr_size)
3120 {
3121         u32 crc_offset;
3122         u8 flashed_crc[4];
3123         int status;
3124
3125         crc_offset = hdr_size + img_start + image_size - 4;
3126
3127         p += crc_offset;
3128
3129         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3130                         (image_size - 4));
3131         if (status) {
3132                 dev_err(&adapter->pdev->dev,
3133                 "could not get crc from flash, not flashing redboot\n");
3134                 return false;
3135         }
3136
3137         /*update redboot only if crc does not match*/
3138         if (!memcmp(flashed_crc, p, 4))
3139                 return false;
3140         else
3141                 return true;
3142 }
3143
3144 static bool phy_flashing_required(struct be_adapter *adapter)
3145 {
3146         return (adapter->phy.phy_type == TN_8022 &&
3147                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3148 }
3149
3150 static bool is_comp_in_ufi(struct be_adapter *adapter,
3151                            struct flash_section_info *fsec, int type)
3152 {
3153         int i = 0, img_type = 0;
3154         struct flash_section_info_g2 *fsec_g2 = NULL;
3155
3156         if (BE2_chip(adapter))
3157                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3158
3159         for (i = 0; i < MAX_FLASH_COMP; i++) {
3160                 if (fsec_g2)
3161                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3162                 else
3163                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3164
3165                 if (img_type == type)
3166                         return true;
3167         }
3168         return false;
3169
3170 }
3171
3172 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3173                                          int header_size,
3174                                          const struct firmware *fw)
3175 {
3176         struct flash_section_info *fsec = NULL;
3177         const u8 *p = fw->data;
3178
3179         p += header_size;
3180         while (p < (fw->data + fw->size)) {
3181                 fsec = (struct flash_section_info *)p;
3182                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3183                         return fsec;
3184                 p += 32;
3185         }
3186         return NULL;
3187 }
3188
3189 static int be_flash(struct be_adapter *adapter, const u8 *img,
3190                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3191 {
3192         u32 total_bytes = 0, flash_op, num_bytes = 0;
3193         int status = 0;
3194         struct be_cmd_write_flashrom *req = flash_cmd->va;
3195
3196         total_bytes = img_size;
3197         while (total_bytes) {
3198                 num_bytes = min_t(u32, 32*1024, total_bytes);
3199
3200                 total_bytes -= num_bytes;
3201
3202                 if (!total_bytes) {
3203                         if (optype == OPTYPE_PHY_FW)
3204                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3205                         else
3206                                 flash_op = FLASHROM_OPER_FLASH;
3207                 } else {
3208                         if (optype == OPTYPE_PHY_FW)
3209                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3210                         else
3211                                 flash_op = FLASHROM_OPER_SAVE;
3212                 }
3213
3214                 memcpy(req->data_buf, img, num_bytes);
3215                 img += num_bytes;
3216                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3217                                                 flash_op, num_bytes);
3218                 if (status) {
3219                         if (status == ILLEGAL_IOCTL_REQ &&
3220                             optype == OPTYPE_PHY_FW)
3221                                 break;
3222                         dev_err(&adapter->pdev->dev,
3223                                 "cmd to write to flash rom failed.\n");
3224                         return status;
3225                 }
3226         }
3227         return 0;
3228 }
3229
3230 /* For BE2, BE3 and BE3-R */
3231 static int be_flash_BEx(struct be_adapter *adapter,
3232                          const struct firmware *fw,
3233                          struct be_dma_mem *flash_cmd,
3234                          int num_of_images)
3235
3236 {
3237         int status = 0, i, filehdr_size = 0;
3238         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3239         const u8 *p = fw->data;
3240         const struct flash_comp *pflashcomp;
3241         int num_comp, redboot;
3242         struct flash_section_info *fsec = NULL;
3243
3244         struct flash_comp gen3_flash_types[] = {
3245                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3246                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3247                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3248                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3249                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3250                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3251                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3252                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3253                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3254                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3255                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3256                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3257                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3258                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3259                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3260                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3261                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3262                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3263                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3264                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3265         };
3266
3267         struct flash_comp gen2_flash_types[] = {
3268                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3269                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3270                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3271                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3272                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3273                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3274                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3275                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3276                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3277                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3278                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3279                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3280                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3281                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3282                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3283                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3284         };
3285
3286         if (BE3_chip(adapter)) {
3287                 pflashcomp = gen3_flash_types;
3288                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3289                 num_comp = ARRAY_SIZE(gen3_flash_types);
3290         } else {
3291                 pflashcomp = gen2_flash_types;
3292                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3293                 num_comp = ARRAY_SIZE(gen2_flash_types);
3294         }
3295
3296         /* Get flash section info*/
3297         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3298         if (!fsec) {
3299                 dev_err(&adapter->pdev->dev,
3300                         "Invalid Cookie. UFI corrupted ?\n");
3301                 return -1;
3302         }
3303         for (i = 0; i < num_comp; i++) {
3304                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3305                         continue;
3306
3307                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3308                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3309                         continue;
3310
3311                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3312                     !phy_flashing_required(adapter))
3313                                 continue;
3314
3315                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3316                         redboot = be_flash_redboot(adapter, fw->data,
3317                                 pflashcomp[i].offset, pflashcomp[i].size,
3318                                 filehdr_size + img_hdrs_size);
3319                         if (!redboot)
3320                                 continue;
3321                 }
3322
3323                 p = fw->data;
3324                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3325                 if (p + pflashcomp[i].size > fw->data + fw->size)
3326                         return -1;
3327
3328                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3329                                         pflashcomp[i].size);
3330                 if (status) {
3331                         dev_err(&adapter->pdev->dev,
3332                                 "Flashing section type %d failed.\n",
3333                                 pflashcomp[i].img_type);
3334                         return status;
3335                 }
3336         }
3337         return 0;
3338 }
3339
3340 static int be_flash_skyhawk(struct be_adapter *adapter,
3341                 const struct firmware *fw,
3342                 struct be_dma_mem *flash_cmd, int num_of_images)
3343 {
3344         int status = 0, i, filehdr_size = 0;
3345         int img_offset, img_size, img_optype, redboot;
3346         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3347         const u8 *p = fw->data;
3348         struct flash_section_info *fsec = NULL;
3349
3350         filehdr_size = sizeof(struct flash_file_hdr_g3);
3351         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3352         if (!fsec) {
3353                 dev_err(&adapter->pdev->dev,
3354                         "Invalid Cookie. UFI corrupted ?\n");
3355                 return -1;
3356         }
3357
3358         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3359                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3360                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3361
3362                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3363                 case IMAGE_FIRMWARE_iSCSI:
3364                         img_optype = OPTYPE_ISCSI_ACTIVE;
3365                         break;
3366                 case IMAGE_BOOT_CODE:
3367                         img_optype = OPTYPE_REDBOOT;
3368                         break;
3369                 case IMAGE_OPTION_ROM_ISCSI:
3370                         img_optype = OPTYPE_BIOS;
3371                         break;
3372                 case IMAGE_OPTION_ROM_PXE:
3373                         img_optype = OPTYPE_PXE_BIOS;
3374                         break;
3375                 case IMAGE_OPTION_ROM_FCoE:
3376                         img_optype = OPTYPE_FCOE_BIOS;
3377                         break;
3378                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3379                         img_optype = OPTYPE_ISCSI_BACKUP;
3380                         break;
3381                 case IMAGE_NCSI:
3382                         img_optype = OPTYPE_NCSI_FW;
3383                         break;
3384                 default:
3385                         continue;
3386                 }
3387
3388                 if (img_optype == OPTYPE_REDBOOT) {
3389                         redboot = be_flash_redboot(adapter, fw->data,
3390                                         img_offset, img_size,
3391                                         filehdr_size + img_hdrs_size);
3392                         if (!redboot)
3393                                 continue;
3394                 }
3395
3396                 p = fw->data;
3397                 p += filehdr_size + img_offset + img_hdrs_size;
3398                 if (p + img_size > fw->data + fw->size)
3399                         return -1;
3400
3401                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3402                 if (status) {
3403                         dev_err(&adapter->pdev->dev,
3404                                 "Flashing section type %d failed.\n",
3405                                 fsec->fsec_entry[i].type);
3406                         return status;
3407                 }
3408         }
3409         return 0;
3410 }
3411
3412 static int lancer_wait_idle(struct be_adapter *adapter)
3413 {
3414 #define SLIPORT_IDLE_TIMEOUT 30
3415         u32 reg_val;
3416         int status = 0, i;
3417
3418         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3419                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3420                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3421                         break;
3422
3423                 ssleep(1);
3424         }
3425
3426         if (i == SLIPORT_IDLE_TIMEOUT)
3427                 status = -1;
3428
3429         return status;
3430 }
3431
3432 static int lancer_fw_reset(struct be_adapter *adapter)
3433 {
3434         int status = 0;
3435
3436         status = lancer_wait_idle(adapter);
3437         if (status)
3438                 return status;
3439
3440         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3441                   PHYSDEV_CONTROL_OFFSET);
3442
3443         return status;
3444 }
3445
3446 static int lancer_fw_download(struct be_adapter *adapter,
3447                                 const struct firmware *fw)
3448 {
3449 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3450 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3451         struct be_dma_mem flash_cmd;
3452         const u8 *data_ptr = NULL;
3453         u8 *dest_image_ptr = NULL;
3454         size_t image_size = 0;
3455         u32 chunk_size = 0;
3456         u32 data_written = 0;
3457         u32 offset = 0;
3458         int status = 0;
3459         u8 add_status = 0;
3460         u8 change_status;
3461
3462         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3463                 dev_err(&adapter->pdev->dev,
3464                         "FW Image not properly aligned. "
3465                         "Length must be 4 byte aligned.\n");
3466                 status = -EINVAL;
3467                 goto lancer_fw_exit;
3468         }
3469
3470         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3471                                 + LANCER_FW_DOWNLOAD_CHUNK;
3472         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3473                                           &flash_cmd.dma, GFP_KERNEL);
3474         if (!flash_cmd.va) {
3475                 status = -ENOMEM;
3476                 goto lancer_fw_exit;
3477         }
3478
3479         dest_image_ptr = flash_cmd.va +
3480                                 sizeof(struct lancer_cmd_req_write_object);
3481         image_size = fw->size;
3482         data_ptr = fw->data;
3483
3484         while (image_size) {
3485                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3486
3487                 /* Copy the image chunk content. */
3488                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3489
3490                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3491                                                  chunk_size, offset,
3492                                                  LANCER_FW_DOWNLOAD_LOCATION,
3493                                                  &data_written, &change_status,
3494                                                  &add_status);
3495                 if (status)
3496                         break;
3497
3498                 offset += data_written;
3499                 data_ptr += data_written;
3500                 image_size -= data_written;
3501         }
3502
3503         if (!status) {
3504                 /* Commit the FW written */
3505                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3506                                                  0, offset,
3507                                                  LANCER_FW_DOWNLOAD_LOCATION,
3508                                                  &data_written, &change_status,
3509                                                  &add_status);
3510         }
3511
3512         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3513                                 flash_cmd.dma);
3514         if (status) {
3515                 dev_err(&adapter->pdev->dev,
3516                         "Firmware load error. "
3517                         "Status code: 0x%x Additional Status: 0x%x\n",
3518                         status, add_status);
3519                 goto lancer_fw_exit;
3520         }
3521
3522         if (change_status == LANCER_FW_RESET_NEEDED) {
3523                 status = lancer_fw_reset(adapter);
3524                 if (status) {
3525                         dev_err(&adapter->pdev->dev,
3526                                 "Adapter busy for FW reset.\n"
3527                                 "New FW will not be active.\n");
3528                         goto lancer_fw_exit;
3529                 }
3530         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3531                         dev_err(&adapter->pdev->dev,
3532                                 "System reboot required for new FW"
3533                                 " to be active\n");
3534         }
3535
3536         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3537 lancer_fw_exit:
3538         return status;
3539 }
3540
3541 #define UFI_TYPE2               2
3542 #define UFI_TYPE3               3
3543 #define UFI_TYPE3R              10
3544 #define UFI_TYPE4               4
3545 static int be_get_ufi_type(struct be_adapter *adapter,
3546                            struct flash_file_hdr_g3 *fhdr)
3547 {
3548         if (fhdr == NULL)
3549                 goto be_get_ufi_exit;
3550
3551         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3552                 return UFI_TYPE4;
3553         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3554                 if (fhdr->asic_type_rev == 0x10)
3555                         return UFI_TYPE3R;
3556                 else
3557                         return UFI_TYPE3;
3558         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3559                 return UFI_TYPE2;
3560
3561 be_get_ufi_exit:
3562         dev_err(&adapter->pdev->dev,
3563                 "UFI and Interface are not compatible for flashing\n");
3564         return -1;
3565 }
3566
3567 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3568 {
3569         struct flash_file_hdr_g3 *fhdr3;
3570         struct image_hdr *img_hdr_ptr = NULL;
3571         struct be_dma_mem flash_cmd;
3572         const u8 *p;
3573         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3574
3575         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3576         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3577                                           &flash_cmd.dma, GFP_KERNEL);
3578         if (!flash_cmd.va) {
3579                 status = -ENOMEM;
3580                 goto be_fw_exit;
3581         }
3582
3583         p = fw->data;
3584         fhdr3 = (struct flash_file_hdr_g3 *)p;
3585
3586         ufi_type = be_get_ufi_type(adapter, fhdr3);
3587
3588         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3589         for (i = 0; i < num_imgs; i++) {
3590                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3591                                 (sizeof(struct flash_file_hdr_g3) +
3592                                  i * sizeof(struct image_hdr)));
3593                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3594                         switch (ufi_type) {
3595                         case UFI_TYPE4:
3596                                 status = be_flash_skyhawk(adapter, fw,
3597                                                         &flash_cmd, num_imgs);
3598                                 break;
3599                         case UFI_TYPE3R:
3600                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3601                                                       num_imgs);
3602                                 break;
3603                         case UFI_TYPE3:
3604                                 /* Do not flash this ufi on BE3-R cards */
3605                                 if (adapter->asic_rev < 0x10)
3606                                         status = be_flash_BEx(adapter, fw,
3607                                                               &flash_cmd,
3608                                                               num_imgs);
3609                                 else {
3610                                         status = -1;
3611                                         dev_err(&adapter->pdev->dev,
3612                                                 "Can't load BE3 UFI on BE3R\n");
3613                                 }
3614                         }
3615                 }
3616         }
3617
3618         if (ufi_type == UFI_TYPE2)
3619                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3620         else if (ufi_type == -1)
3621                 status = -1;
3622
3623         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3624                           flash_cmd.dma);
3625         if (status) {
3626                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3627                 goto be_fw_exit;
3628         }
3629
3630         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3631
3632 be_fw_exit:
3633         return status;
3634 }
3635
3636 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3637 {
3638         const struct firmware *fw;
3639         int status;
3640
3641         if (!netif_running(adapter->netdev)) {
3642                 dev_err(&adapter->pdev->dev,
3643                         "Firmware load not allowed (interface is down)\n");
3644                 return -1;
3645         }
3646
3647         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3648         if (status)
3649                 goto fw_exit;
3650
3651         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3652
3653         if (lancer_chip(adapter))
3654                 status = lancer_fw_download(adapter, fw);
3655         else
3656                 status = be_fw_download(adapter, fw);
3657
3658 fw_exit:
3659         release_firmware(fw);
3660         return status;
3661 }
3662
3663 static const struct net_device_ops be_netdev_ops = {
3664         .ndo_open               = be_open,
3665         .ndo_stop               = be_close,
3666         .ndo_start_xmit         = be_xmit,
3667         .ndo_set_rx_mode        = be_set_rx_mode,
3668         .ndo_set_mac_address    = be_mac_addr_set,
3669         .ndo_change_mtu         = be_change_mtu,
3670         .ndo_get_stats64        = be_get_stats64,
3671         .ndo_validate_addr      = eth_validate_addr,
3672         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3673         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3674         .ndo_set_vf_mac         = be_set_vf_mac,
3675         .ndo_set_vf_vlan        = be_set_vf_vlan,
3676         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3677         .ndo_get_vf_config      = be_get_vf_config,
3678 #ifdef CONFIG_NET_POLL_CONTROLLER
3679         .ndo_poll_controller    = be_netpoll,
3680 #endif
3681 };
3682
3683 static void be_netdev_init(struct net_device *netdev)
3684 {
3685         struct be_adapter *adapter = netdev_priv(netdev);
3686         struct be_eq_obj *eqo;
3687         int i;
3688
3689         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3690                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3691                 NETIF_F_HW_VLAN_CTAG_TX;
3692         if (be_multi_rxq(adapter))
3693                 netdev->hw_features |= NETIF_F_RXHASH;
3694
3695         netdev->features |= netdev->hw_features |
3696                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3697
3698         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3699                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3700
3701         netdev->priv_flags |= IFF_UNICAST_FLT;
3702
3703         netdev->flags |= IFF_MULTICAST;
3704
3705         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3706
3707         netdev->netdev_ops = &be_netdev_ops;
3708
3709         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3710
3711         for_all_evt_queues(adapter, eqo, i)
3712                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3713 }
3714
3715 static void be_unmap_pci_bars(struct be_adapter *adapter)
3716 {
3717         if (adapter->csr)
3718                 pci_iounmap(adapter->pdev, adapter->csr);
3719         if (adapter->db)
3720                 pci_iounmap(adapter->pdev, adapter->db);
3721 }
3722
3723 static int db_bar(struct be_adapter *adapter)
3724 {
3725         if (lancer_chip(adapter) || !be_physfn(adapter))
3726                 return 0;
3727         else
3728                 return 4;
3729 }
3730
3731 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3732 {
3733         if (skyhawk_chip(adapter)) {
3734                 adapter->roce_db.size = 4096;
3735                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3736                                                               db_bar(adapter));
3737                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3738                                                                db_bar(adapter));
3739         }
3740         return 0;
3741 }
3742
3743 static int be_map_pci_bars(struct be_adapter *adapter)
3744 {
3745         u8 __iomem *addr;
3746         u32 sli_intf;
3747
3748         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3749         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3750                                 SLI_INTF_IF_TYPE_SHIFT;
3751
3752         if (BEx_chip(adapter) && be_physfn(adapter)) {
3753                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3754                 if (adapter->csr == NULL)
3755                         return -ENOMEM;
3756         }
3757
3758         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3759         if (addr == NULL)
3760                 goto pci_map_err;
3761         adapter->db = addr;
3762
3763         be_roce_map_pci_bars(adapter);
3764         return 0;
3765
3766 pci_map_err:
3767         be_unmap_pci_bars(adapter);
3768         return -ENOMEM;
3769 }
3770
3771 static void be_ctrl_cleanup(struct be_adapter *adapter)
3772 {
3773         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3774
3775         be_unmap_pci_bars(adapter);
3776
3777         if (mem->va)
3778                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3779                                   mem->dma);
3780
3781         mem = &adapter->rx_filter;
3782         if (mem->va)
3783                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3784                                   mem->dma);
3785 }
3786
3787 static int be_ctrl_init(struct be_adapter *adapter)
3788 {
3789         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3790         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3791         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3792         u32 sli_intf;
3793         int status;
3794
3795         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3796         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3797                                  SLI_INTF_FAMILY_SHIFT;
3798         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3799
3800         status = be_map_pci_bars(adapter);
3801         if (status)
3802                 goto done;
3803
3804         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3805         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3806                                                 mbox_mem_alloc->size,
3807                                                 &mbox_mem_alloc->dma,
3808                                                 GFP_KERNEL);
3809         if (!mbox_mem_alloc->va) {
3810                 status = -ENOMEM;
3811                 goto unmap_pci_bars;
3812         }
3813         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3814         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3815         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3816         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3817
3818         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3819         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3820                                            &rx_filter->dma,
3821                                            GFP_KERNEL | __GFP_ZERO);
3822         if (rx_filter->va == NULL) {
3823                 status = -ENOMEM;
3824                 goto free_mbox;
3825         }
3826
3827         mutex_init(&adapter->mbox_lock);
3828         spin_lock_init(&adapter->mcc_lock);
3829         spin_lock_init(&adapter->mcc_cq_lock);
3830
3831         init_completion(&adapter->flash_compl);
3832         pci_save_state(adapter->pdev);
3833         return 0;
3834
3835 free_mbox:
3836         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3837                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3838
3839 unmap_pci_bars:
3840         be_unmap_pci_bars(adapter);
3841
3842 done:
3843         return status;
3844 }
3845
3846 static void be_stats_cleanup(struct be_adapter *adapter)
3847 {
3848         struct be_dma_mem *cmd = &adapter->stats_cmd;
3849
3850         if (cmd->va)
3851                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3852                                   cmd->va, cmd->dma);
3853 }
3854
3855 static int be_stats_init(struct be_adapter *adapter)
3856 {
3857         struct be_dma_mem *cmd = &adapter->stats_cmd;
3858
3859         if (lancer_chip(adapter))
3860                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3861         else if (BE2_chip(adapter))
3862                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3863         else
3864                 /* BE3 and Skyhawk */
3865                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3866
3867         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3868                                      GFP_KERNEL | __GFP_ZERO);
3869         if (cmd->va == NULL)
3870                 return -1;
3871         return 0;
3872 }
3873
3874 static void be_remove(struct pci_dev *pdev)
3875 {
3876         struct be_adapter *adapter = pci_get_drvdata(pdev);
3877
3878         if (!adapter)
3879                 return;
3880
3881         be_roce_dev_remove(adapter);
3882         be_intr_set(adapter, false);
3883
3884         cancel_delayed_work_sync(&adapter->func_recovery_work);
3885
3886         unregister_netdev(adapter->netdev);
3887
3888         be_clear(adapter);
3889
3890         /* tell fw we're done with firing cmds */
3891         be_cmd_fw_clean(adapter);
3892
3893         be_stats_cleanup(adapter);
3894
3895         be_ctrl_cleanup(adapter);
3896
3897         pci_disable_pcie_error_reporting(pdev);
3898
3899         pci_set_drvdata(pdev, NULL);
3900         pci_release_regions(pdev);
3901         pci_disable_device(pdev);
3902
3903         free_netdev(adapter->netdev);
3904 }
3905
3906 bool be_is_wol_supported(struct be_adapter *adapter)
3907 {
3908         return ((adapter->wol_cap & BE_WOL_CAP) &&
3909                 !be_is_wol_excluded(adapter)) ? true : false;
3910 }
3911
3912 u32 be_get_fw_log_level(struct be_adapter *adapter)
3913 {
3914         struct be_dma_mem extfat_cmd;
3915         struct be_fat_conf_params *cfgs;
3916         int status;
3917         u32 level = 0;
3918         int j;
3919
3920         if (lancer_chip(adapter))
3921                 return 0;
3922
3923         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3924         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3925         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3926                                              &extfat_cmd.dma);
3927
3928         if (!extfat_cmd.va) {
3929                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3930                         __func__);
3931                 goto err;
3932         }
3933
3934         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3935         if (!status) {
3936                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3937                                                 sizeof(struct be_cmd_resp_hdr));
3938                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3939                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3940                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3941                 }
3942         }
3943         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3944                             extfat_cmd.dma);
3945 err:
3946         return level;
3947 }
3948
3949 static int be_get_initial_config(struct be_adapter *adapter)
3950 {
3951         int status;
3952         u32 level;
3953
3954         status = be_cmd_get_cntl_attributes(adapter);
3955         if (status)
3956                 return status;
3957
3958         status = be_cmd_get_acpi_wol_cap(adapter);
3959         if (status) {
3960                 /* in case of a failure to get wol capabillities
3961                  * check the exclusion list to determine WOL capability */
3962                 if (!be_is_wol_excluded(adapter))
3963                         adapter->wol_cap |= BE_WOL_CAP;
3964         }
3965
3966         if (be_is_wol_supported(adapter))
3967                 adapter->wol = true;
3968
3969         /* Must be a power of 2 or else MODULO will BUG_ON */
3970         adapter->be_get_temp_freq = 64;
3971
3972         level = be_get_fw_log_level(adapter);
3973         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3974
3975         return 0;
3976 }
3977
3978 static int lancer_recover_func(struct be_adapter *adapter)
3979 {
3980         int status;
3981
3982         status = lancer_test_and_set_rdy_state(adapter);
3983         if (status)
3984                 goto err;
3985
3986         if (netif_running(adapter->netdev))
3987                 be_close(adapter->netdev);
3988
3989         be_clear(adapter);
3990
3991         adapter->hw_error = false;
3992         adapter->fw_timeout = false;
3993
3994         status = be_setup(adapter);
3995         if (status)
3996                 goto err;
3997
3998         if (netif_running(adapter->netdev)) {
3999                 status = be_open(adapter->netdev);
4000                 if (status)
4001                         goto err;
4002         }
4003
4004         dev_err(&adapter->pdev->dev,
4005                 "Adapter SLIPORT recovery succeeded\n");
4006         return 0;
4007 err:
4008         if (adapter->eeh_error)
4009                 dev_err(&adapter->pdev->dev,
4010                         "Adapter SLIPORT recovery failed\n");
4011
4012         return status;
4013 }
4014
4015 static void be_func_recovery_task(struct work_struct *work)
4016 {
4017         struct be_adapter *adapter =
4018                 container_of(work, struct be_adapter,  func_recovery_work.work);
4019         int status;
4020
4021         be_detect_error(adapter);
4022
4023         if (adapter->hw_error && lancer_chip(adapter)) {
4024
4025                 if (adapter->eeh_error)
4026                         goto out;
4027
4028                 rtnl_lock();
4029                 netif_device_detach(adapter->netdev);
4030                 rtnl_unlock();
4031
4032                 status = lancer_recover_func(adapter);
4033
4034                 if (!status)
4035                         netif_device_attach(adapter->netdev);
4036         }
4037
4038 out:
4039         schedule_delayed_work(&adapter->func_recovery_work,
4040                               msecs_to_jiffies(1000));
4041 }
4042
4043 static void be_worker(struct work_struct *work)
4044 {
4045         struct be_adapter *adapter =
4046                 container_of(work, struct be_adapter, work.work);
4047         struct be_rx_obj *rxo;
4048         struct be_eq_obj *eqo;
4049         int i;
4050
4051         /* when interrupts are not yet enabled, just reap any pending
4052         * mcc completions */
4053         if (!netif_running(adapter->netdev)) {
4054                 local_bh_disable();
4055                 be_process_mcc(adapter);
4056                 local_bh_enable();
4057                 goto reschedule;
4058         }
4059
4060         if (!adapter->stats_cmd_sent) {
4061                 if (lancer_chip(adapter))
4062                         lancer_cmd_get_pport_stats(adapter,
4063                                                 &adapter->stats_cmd);
4064                 else
4065                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4066         }
4067
4068         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4069                 be_cmd_get_die_temperature(adapter);
4070
4071         for_all_rx_queues(adapter, rxo, i) {
4072                 if (rxo->rx_post_starved) {
4073                         rxo->rx_post_starved = false;
4074                         be_post_rx_frags(rxo, GFP_KERNEL);
4075                 }
4076         }
4077
4078         for_all_evt_queues(adapter, eqo, i)
4079                 be_eqd_update(adapter, eqo);
4080
4081 reschedule:
4082         adapter->work_counter++;
4083         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4084 }
4085
4086 static bool be_reset_required(struct be_adapter *adapter)
4087 {
4088         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4089 }
4090
4091 static char *mc_name(struct be_adapter *adapter)
4092 {
4093         if (adapter->function_mode & FLEX10_MODE)
4094                 return "FLEX10";
4095         else if (adapter->function_mode & VNIC_MODE)
4096                 return "vNIC";
4097         else if (adapter->function_mode & UMC_ENABLED)
4098                 return "UMC";
4099         else
4100                 return "";
4101 }
4102
4103 static inline char *func_name(struct be_adapter *adapter)
4104 {
4105         return be_physfn(adapter) ? "PF" : "VF";
4106 }
4107
4108 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4109 {
4110         int status = 0;
4111         struct be_adapter *adapter;
4112         struct net_device *netdev;
4113         char port_name;
4114
4115         status = pci_enable_device(pdev);
4116         if (status)
4117                 goto do_none;
4118
4119         status = pci_request_regions(pdev, DRV_NAME);
4120         if (status)
4121                 goto disable_dev;
4122         pci_set_master(pdev);
4123
4124         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4125         if (netdev == NULL) {
4126                 status = -ENOMEM;
4127                 goto rel_reg;
4128         }
4129         adapter = netdev_priv(netdev);
4130         adapter->pdev = pdev;
4131         pci_set_drvdata(pdev, adapter);
4132         adapter->netdev = netdev;
4133         SET_NETDEV_DEV(netdev, &pdev->dev);
4134
4135         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4136         if (!status) {
4137                 netdev->features |= NETIF_F_HIGHDMA;
4138         } else {
4139                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4140                 if (status) {
4141                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4142                         goto free_netdev;
4143                 }
4144         }
4145
4146         status = pci_enable_pcie_error_reporting(pdev);
4147         if (status)
4148                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4149
4150         status = be_ctrl_init(adapter);
4151         if (status)
4152                 goto free_netdev;
4153
4154         /* sync up with fw's ready state */
4155         if (be_physfn(adapter)) {
4156                 status = be_fw_wait_ready(adapter);
4157                 if (status)
4158                         goto ctrl_clean;
4159         }
4160
4161         /* tell fw we're ready to fire cmds */
4162         status = be_cmd_fw_init(adapter);
4163         if (status)
4164                 goto ctrl_clean;
4165
4166         if (be_reset_required(adapter)) {
4167                 status = be_cmd_reset_function(adapter);
4168                 if (status)
4169                         goto ctrl_clean;
4170         }
4171
4172         /* Wait for interrupts to quiesce after an FLR */
4173         msleep(100);
4174
4175         /* Allow interrupts for other ULPs running on NIC function */
4176         be_intr_set(adapter, true);
4177
4178         status = be_stats_init(adapter);
4179         if (status)
4180                 goto ctrl_clean;
4181
4182         status = be_get_initial_config(adapter);
4183         if (status)
4184                 goto stats_clean;
4185
4186         INIT_DELAYED_WORK(&adapter->work, be_worker);
4187         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4188         adapter->rx_fc = adapter->tx_fc = true;
4189
4190         status = be_setup(adapter);
4191         if (status)
4192                 goto stats_clean;
4193
4194         be_netdev_init(netdev);
4195         status = register_netdev(netdev);
4196         if (status != 0)
4197                 goto unsetup;
4198
4199         be_roce_dev_add(adapter);
4200
4201         schedule_delayed_work(&adapter->func_recovery_work,
4202                               msecs_to_jiffies(1000));
4203
4204         be_cmd_query_port_name(adapter, &port_name);
4205
4206         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4207                  func_name(adapter), mc_name(adapter), port_name);
4208
4209         return 0;
4210
4211 unsetup:
4212         be_clear(adapter);
4213 stats_clean:
4214         be_stats_cleanup(adapter);
4215 ctrl_clean:
4216         be_ctrl_cleanup(adapter);
4217 free_netdev:
4218         free_netdev(netdev);
4219         pci_set_drvdata(pdev, NULL);
4220 rel_reg:
4221         pci_release_regions(pdev);
4222 disable_dev:
4223         pci_disable_device(pdev);
4224 do_none:
4225         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4226         return status;
4227 }
4228
4229 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4230 {
4231         struct be_adapter *adapter = pci_get_drvdata(pdev);
4232         struct net_device *netdev =  adapter->netdev;
4233
4234         if (adapter->wol)
4235                 be_setup_wol(adapter, true);
4236
4237         cancel_delayed_work_sync(&adapter->func_recovery_work);
4238
4239         netif_device_detach(netdev);
4240         if (netif_running(netdev)) {
4241                 rtnl_lock();
4242                 be_close(netdev);
4243                 rtnl_unlock();
4244         }
4245         be_clear(adapter);
4246
4247         pci_save_state(pdev);
4248         pci_disable_device(pdev);
4249         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4250         return 0;
4251 }
4252
4253 static int be_resume(struct pci_dev *pdev)
4254 {
4255         int status = 0;
4256         struct be_adapter *adapter = pci_get_drvdata(pdev);
4257         struct net_device *netdev =  adapter->netdev;
4258
4259         netif_device_detach(netdev);
4260
4261         status = pci_enable_device(pdev);
4262         if (status)
4263                 return status;
4264
4265         pci_set_power_state(pdev, 0);
4266         pci_restore_state(pdev);
4267
4268         /* tell fw we're ready to fire cmds */
4269         status = be_cmd_fw_init(adapter);
4270         if (status)
4271                 return status;
4272
4273         be_setup(adapter);
4274         if (netif_running(netdev)) {
4275                 rtnl_lock();
4276                 be_open(netdev);
4277                 rtnl_unlock();
4278         }
4279
4280         schedule_delayed_work(&adapter->func_recovery_work,
4281                               msecs_to_jiffies(1000));
4282         netif_device_attach(netdev);
4283
4284         if (adapter->wol)
4285                 be_setup_wol(adapter, false);
4286
4287         return 0;
4288 }
4289
4290 /*
4291  * An FLR will stop BE from DMAing any data.
4292  */
4293 static void be_shutdown(struct pci_dev *pdev)
4294 {
4295         struct be_adapter *adapter = pci_get_drvdata(pdev);
4296
4297         if (!adapter)
4298                 return;
4299
4300         cancel_delayed_work_sync(&adapter->work);
4301         cancel_delayed_work_sync(&adapter->func_recovery_work);
4302
4303         netif_device_detach(adapter->netdev);
4304
4305         be_cmd_reset_function(adapter);
4306
4307         pci_disable_device(pdev);
4308 }
4309
4310 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4311                                 pci_channel_state_t state)
4312 {
4313         struct be_adapter *adapter = pci_get_drvdata(pdev);
4314         struct net_device *netdev =  adapter->netdev;
4315
4316         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4317
4318         adapter->eeh_error = true;
4319
4320         cancel_delayed_work_sync(&adapter->func_recovery_work);
4321
4322         rtnl_lock();
4323         netif_device_detach(netdev);
4324         rtnl_unlock();
4325
4326         if (netif_running(netdev)) {
4327                 rtnl_lock();
4328                 be_close(netdev);
4329                 rtnl_unlock();
4330         }
4331         be_clear(adapter);
4332
4333         if (state == pci_channel_io_perm_failure)
4334                 return PCI_ERS_RESULT_DISCONNECT;
4335
4336         pci_disable_device(pdev);
4337
4338         /* The error could cause the FW to trigger a flash debug dump.
4339          * Resetting the card while flash dump is in progress
4340          * can cause it not to recover; wait for it to finish.
4341          * Wait only for first function as it is needed only once per
4342          * adapter.
4343          */
4344         if (pdev->devfn == 0)
4345                 ssleep(30);
4346
4347         return PCI_ERS_RESULT_NEED_RESET;
4348 }
4349
4350 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4351 {
4352         struct be_adapter *adapter = pci_get_drvdata(pdev);
4353         int status;
4354
4355         dev_info(&adapter->pdev->dev, "EEH reset\n");
4356         be_clear_all_error(adapter);
4357
4358         status = pci_enable_device(pdev);
4359         if (status)
4360                 return PCI_ERS_RESULT_DISCONNECT;
4361
4362         pci_set_master(pdev);
4363         pci_set_power_state(pdev, 0);
4364         pci_restore_state(pdev);
4365
4366         /* Check if card is ok and fw is ready */
4367         dev_info(&adapter->pdev->dev,
4368                  "Waiting for FW to be ready after EEH reset\n");
4369         status = be_fw_wait_ready(adapter);
4370         if (status)
4371                 return PCI_ERS_RESULT_DISCONNECT;
4372
4373         pci_cleanup_aer_uncorrect_error_status(pdev);
4374         return PCI_ERS_RESULT_RECOVERED;
4375 }
4376
4377 static void be_eeh_resume(struct pci_dev *pdev)
4378 {
4379         int status = 0;
4380         struct be_adapter *adapter = pci_get_drvdata(pdev);
4381         struct net_device *netdev =  adapter->netdev;
4382
4383         dev_info(&adapter->pdev->dev, "EEH resume\n");
4384
4385         pci_save_state(pdev);
4386
4387         /* tell fw we're ready to fire cmds */
4388         status = be_cmd_fw_init(adapter);
4389         if (status)
4390                 goto err;
4391
4392         status = be_cmd_reset_function(adapter);
4393         if (status)
4394                 goto err;
4395
4396         status = be_setup(adapter);
4397         if (status)
4398                 goto err;
4399
4400         if (netif_running(netdev)) {
4401                 status = be_open(netdev);
4402                 if (status)
4403                         goto err;
4404         }
4405
4406         schedule_delayed_work(&adapter->func_recovery_work,
4407                               msecs_to_jiffies(1000));
4408         netif_device_attach(netdev);
4409         return;
4410 err:
4411         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4412 }
4413
4414 static const struct pci_error_handlers be_eeh_handlers = {
4415         .error_detected = be_eeh_err_detected,
4416         .slot_reset = be_eeh_reset,
4417         .resume = be_eeh_resume,
4418 };
4419
4420 static struct pci_driver be_driver = {
4421         .name = DRV_NAME,
4422         .id_table = be_dev_ids,
4423         .probe = be_probe,
4424         .remove = be_remove,
4425         .suspend = be_suspend,
4426         .resume = be_resume,
4427         .shutdown = be_shutdown,
4428         .err_handler = &be_eeh_handlers
4429 };
4430
4431 static int __init be_init_module(void)
4432 {
4433         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4434             rx_frag_size != 2048) {
4435                 printk(KERN_WARNING DRV_NAME
4436                         " : Module param rx_frag_size must be 2048/4096/8192."
4437                         " Using 2048\n");
4438                 rx_frag_size = 2048;
4439         }
4440
4441         return pci_register_driver(&be_driver);
4442 }
4443 module_init(be_init_module);
4444
4445 static void __exit be_exit_module(void)
4446 {
4447         pci_unregister_driver(&be_driver);
4448 }
4449 module_exit(be_exit_module);