]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix skb double free in be_xmit_wrokarounds() failure path
[~andy/linux] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917                                            struct sk_buff *skb,
918                                            bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925          * may cause a transmit stall on that port. So the work-around is to
926          * pad short packets (<= 32 bytes) to a 36-byte length.
927          */
928         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
929                 if (skb_padto(skb, 36))
930                         goto err;
931                 skb->len = 36;
932         }
933
934         /* For padded packets, BE HW modifies tot_len field in IP header
935          * incorrecly when VLAN tag is inserted by HW.
936          * For padded packets, Lancer computes incorrect checksum.
937          */
938         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939                                                 VLAN_ETH_HLEN : ETH_HLEN;
940         if (skb->len <= 60 &&
941             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
942             is_ipv4_pkt(skb)) {
943                 ip = (struct iphdr *)ip_hdr(skb);
944                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945         }
946
947         /* If vlan tag is already inlined in the packet, skip HW VLAN
948          * tagging in UMC mode
949          */
950         if ((adapter->function_mode & UMC_ENABLED) &&
951             veh->h_vlan_proto == htons(ETH_P_8021Q))
952                         *skip_hw_vlan = true;
953
954         /* HW has a bug wherein it will calculate CSUM for VLAN
955          * pkts even though it is disabled.
956          * Manually insert VLAN in pkt.
957          */
958         if (skb->ip_summed != CHECKSUM_PARTIAL &&
959             vlan_tx_tag_present(skb)) {
960                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
961                 if (unlikely(!skb))
962                         goto err;
963         }
964
965         /* HW may lockup when VLAN HW tagging is requested on
966          * certain ipv6 packets. Drop such pkts if the HW workaround to
967          * skip HW tagging is not enabled by FW.
968          */
969         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
970             (adapter->pvid || adapter->qnq_vid) &&
971             !qnq_async_evt_rcvd(adapter)))
972                 goto tx_drop;
973
974         /* Manual VLAN tag insertion to prevent:
975          * ASIC lockup when the ASIC inserts VLAN tag into
976          * certain ipv6 packets. Insert VLAN tags in driver,
977          * and set event, completion, vlan bits accordingly
978          * in the Tx WRB.
979          */
980         if (be_ipv6_tx_stall_chk(adapter, skb) &&
981             be_vlan_tag_tx_chk(adapter, skb)) {
982                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
983                 if (unlikely(!skb))
984                         goto err;
985         }
986
987         return skb;
988 tx_drop:
989         dev_kfree_skb_any(skb);
990 err:
991         return NULL;
992 }
993
994 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
995 {
996         struct be_adapter *adapter = netdev_priv(netdev);
997         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
998         struct be_queue_info *txq = &txo->q;
999         bool dummy_wrb, stopped = false;
1000         u32 wrb_cnt = 0, copied = 0;
1001         bool skip_hw_vlan = false;
1002         u32 start = txq->head;
1003
1004         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1005         if (!skb) {
1006                 tx_stats(txo)->tx_drv_drops++;
1007                 return NETDEV_TX_OK;
1008         }
1009
1010         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1011
1012         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1013                               skip_hw_vlan);
1014         if (copied) {
1015                 int gso_segs = skb_shinfo(skb)->gso_segs;
1016
1017                 /* record the sent skb in the sent_skb table */
1018                 BUG_ON(txo->sent_skb_list[start]);
1019                 txo->sent_skb_list[start] = skb;
1020
1021                 /* Ensure txq has space for the next skb; Else stop the queue
1022                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1023                  * tx compls of the current transmit which'll wake up the queue
1024                  */
1025                 atomic_add(wrb_cnt, &txq->used);
1026                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1027                                                                 txq->len) {
1028                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1029                         stopped = true;
1030                 }
1031
1032                 be_txq_notify(adapter, txo, wrb_cnt);
1033
1034                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1035         } else {
1036                 txq->head = start;
1037                 tx_stats(txo)->tx_drv_drops++;
1038                 dev_kfree_skb_any(skb);
1039         }
1040         return NETDEV_TX_OK;
1041 }
1042
1043 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1044 {
1045         struct be_adapter *adapter = netdev_priv(netdev);
1046         if (new_mtu < BE_MIN_MTU ||
1047                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1048                                         (ETH_HLEN + ETH_FCS_LEN))) {
1049                 dev_info(&adapter->pdev->dev,
1050                         "MTU must be between %d and %d bytes\n",
1051                         BE_MIN_MTU,
1052                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1053                 return -EINVAL;
1054         }
1055         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1056                         netdev->mtu, new_mtu);
1057         netdev->mtu = new_mtu;
1058         return 0;
1059 }
1060
1061 /*
1062  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1063  * If the user configures more, place BE in vlan promiscuous mode.
1064  */
1065 static int be_vid_config(struct be_adapter *adapter)
1066 {
1067         u16 vids[BE_NUM_VLANS_SUPPORTED];
1068         u16 num = 0, i;
1069         int status = 0;
1070
1071         /* No need to further configure vids if in promiscuous mode */
1072         if (adapter->promiscuous)
1073                 return 0;
1074
1075         if (adapter->vlans_added > be_max_vlans(adapter))
1076                 goto set_vlan_promisc;
1077
1078         /* Construct VLAN Table to give to HW */
1079         for (i = 0; i < VLAN_N_VID; i++)
1080                 if (adapter->vlan_tag[i])
1081                         vids[num++] = cpu_to_le16(i);
1082
1083         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1084                                     vids, num, 0);
1085
1086         if (status) {
1087                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1088                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1089                         goto set_vlan_promisc;
1090                 dev_err(&adapter->pdev->dev,
1091                         "Setting HW VLAN filtering failed.\n");
1092         } else {
1093                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1094                         /* hw VLAN filtering re-enabled. */
1095                         status = be_cmd_rx_filter(adapter,
1096                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1097                         if (!status) {
1098                                 dev_info(&adapter->pdev->dev,
1099                                          "Disabling VLAN Promiscuous mode.\n");
1100                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1101                         }
1102                 }
1103         }
1104
1105         return status;
1106
1107 set_vlan_promisc:
1108         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1109                 return 0;
1110
1111         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112         if (!status) {
1113                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1114                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1115         } else
1116                 dev_err(&adapter->pdev->dev,
1117                         "Failed to enable VLAN Promiscuous mode.\n");
1118         return status;
1119 }
1120
1121 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1122 {
1123         struct be_adapter *adapter = netdev_priv(netdev);
1124         int status = 0;
1125
1126         /* Packets with VID 0 are always received by Lancer by default */
1127         if (lancer_chip(adapter) && vid == 0)
1128                 goto ret;
1129
1130         adapter->vlan_tag[vid] = 1;
1131         adapter->vlans_added++;
1132
1133         status = be_vid_config(adapter);
1134         if (status) {
1135                 adapter->vlans_added--;
1136                 adapter->vlan_tag[vid] = 0;
1137         }
1138 ret:
1139         return status;
1140 }
1141
1142 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1143 {
1144         struct be_adapter *adapter = netdev_priv(netdev);
1145         int status = 0;
1146
1147         /* Packets with VID 0 are always received by Lancer by default */
1148         if (lancer_chip(adapter) && vid == 0)
1149                 goto ret;
1150
1151         adapter->vlan_tag[vid] = 0;
1152         status = be_vid_config(adapter);
1153         if (!status)
1154                 adapter->vlans_added--;
1155         else
1156                 adapter->vlan_tag[vid] = 1;
1157 ret:
1158         return status;
1159 }
1160
1161 static void be_clear_promisc(struct be_adapter *adapter)
1162 {
1163         adapter->promiscuous = false;
1164         adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1165
1166         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1167 }
1168
1169 static void be_set_rx_mode(struct net_device *netdev)
1170 {
1171         struct be_adapter *adapter = netdev_priv(netdev);
1172         int status;
1173
1174         if (netdev->flags & IFF_PROMISC) {
1175                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1176                 adapter->promiscuous = true;
1177                 goto done;
1178         }
1179
1180         /* BE was previously in promiscuous mode; disable it */
1181         if (adapter->promiscuous) {
1182                 be_clear_promisc(adapter);
1183                 if (adapter->vlans_added)
1184                         be_vid_config(adapter);
1185         }
1186
1187         /* Enable multicast promisc if num configured exceeds what we support */
1188         if (netdev->flags & IFF_ALLMULTI ||
1189             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1190                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1191                 goto done;
1192         }
1193
1194         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1195                 struct netdev_hw_addr *ha;
1196                 int i = 1; /* First slot is claimed by the Primary MAC */
1197
1198                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1199                         be_cmd_pmac_del(adapter, adapter->if_handle,
1200                                         adapter->pmac_id[i], 0);
1201                 }
1202
1203                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1204                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1205                         adapter->promiscuous = true;
1206                         goto done;
1207                 }
1208
1209                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1210                         adapter->uc_macs++; /* First slot is for Primary MAC */
1211                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1212                                         adapter->if_handle,
1213                                         &adapter->pmac_id[adapter->uc_macs], 0);
1214                 }
1215         }
1216
1217         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1218
1219         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1220         if (status) {
1221                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1222                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1223                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1224         }
1225 done:
1226         return;
1227 }
1228
1229 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1230 {
1231         struct be_adapter *adapter = netdev_priv(netdev);
1232         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1233         int status;
1234
1235         if (!sriov_enabled(adapter))
1236                 return -EPERM;
1237
1238         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1239                 return -EINVAL;
1240
1241         if (BEx_chip(adapter)) {
1242                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1243                                 vf + 1);
1244
1245                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1246                                          &vf_cfg->pmac_id, vf + 1);
1247         } else {
1248                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1249                                         vf + 1);
1250         }
1251
1252         if (status)
1253                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1254                                 mac, vf);
1255         else
1256                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1257
1258         return status;
1259 }
1260
1261 static int be_get_vf_config(struct net_device *netdev, int vf,
1262                         struct ifla_vf_info *vi)
1263 {
1264         struct be_adapter *adapter = netdev_priv(netdev);
1265         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1266
1267         if (!sriov_enabled(adapter))
1268                 return -EPERM;
1269
1270         if (vf >= adapter->num_vfs)
1271                 return -EINVAL;
1272
1273         vi->vf = vf;
1274         vi->tx_rate = vf_cfg->tx_rate;
1275         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1276         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1277         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1278
1279         return 0;
1280 }
1281
1282 static int be_set_vf_vlan(struct net_device *netdev,
1283                         int vf, u16 vlan, u8 qos)
1284 {
1285         struct be_adapter *adapter = netdev_priv(netdev);
1286         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1287         int status = 0;
1288
1289         if (!sriov_enabled(adapter))
1290                 return -EPERM;
1291
1292         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1293                 return -EINVAL;
1294
1295         if (vlan || qos) {
1296                 vlan |= qos << VLAN_PRIO_SHIFT;
1297                 if (vf_cfg->vlan_tag != vlan)
1298                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299                                                        vf_cfg->if_handle, 0);
1300         } else {
1301                 /* Reset Transparent Vlan Tagging. */
1302                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1303                                                vf + 1, vf_cfg->if_handle, 0);
1304         }
1305
1306         if (!status)
1307                 vf_cfg->vlan_tag = vlan;
1308         else
1309                 dev_info(&adapter->pdev->dev,
1310                          "VLAN %d config on VF %d failed\n", vlan, vf);
1311         return status;
1312 }
1313
1314 static int be_set_vf_tx_rate(struct net_device *netdev,
1315                         int vf, int rate)
1316 {
1317         struct be_adapter *adapter = netdev_priv(netdev);
1318         int status = 0;
1319
1320         if (!sriov_enabled(adapter))
1321                 return -EPERM;
1322
1323         if (vf >= adapter->num_vfs)
1324                 return -EINVAL;
1325
1326         if (rate < 100 || rate > 10000) {
1327                 dev_err(&adapter->pdev->dev,
1328                         "tx rate must be between 100 and 10000 Mbps\n");
1329                 return -EINVAL;
1330         }
1331
1332         if (lancer_chip(adapter))
1333                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1334         else
1335                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1336
1337         if (status)
1338                 dev_err(&adapter->pdev->dev,
1339                                 "tx rate %d on VF %d failed\n", rate, vf);
1340         else
1341                 adapter->vf_cfg[vf].tx_rate = rate;
1342         return status;
1343 }
1344
1345 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1346                           ulong now)
1347 {
1348         aic->rx_pkts_prev = rx_pkts;
1349         aic->tx_reqs_prev = tx_pkts;
1350         aic->jiffies = now;
1351 }
1352
1353 static void be_eqd_update(struct be_adapter *adapter)
1354 {
1355         struct be_set_eqd set_eqd[MAX_EVT_QS];
1356         int eqd, i, num = 0, start;
1357         struct be_aic_obj *aic;
1358         struct be_eq_obj *eqo;
1359         struct be_rx_obj *rxo;
1360         struct be_tx_obj *txo;
1361         u64 rx_pkts, tx_pkts;
1362         ulong now;
1363         u32 pps, delta;
1364
1365         for_all_evt_queues(adapter, eqo, i) {
1366                 aic = &adapter->aic_obj[eqo->idx];
1367                 if (!aic->enable) {
1368                         if (aic->jiffies)
1369                                 aic->jiffies = 0;
1370                         eqd = aic->et_eqd;
1371                         goto modify_eqd;
1372                 }
1373
1374                 rxo = &adapter->rx_obj[eqo->idx];
1375                 do {
1376                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1377                         rx_pkts = rxo->stats.rx_pkts;
1378                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1379
1380                 txo = &adapter->tx_obj[eqo->idx];
1381                 do {
1382                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1383                         tx_pkts = txo->stats.tx_reqs;
1384                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1385
1386
1387                 /* Skip, if wrapped around or first calculation */
1388                 now = jiffies;
1389                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1390                     rx_pkts < aic->rx_pkts_prev ||
1391                     tx_pkts < aic->tx_reqs_prev) {
1392                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1393                         continue;
1394                 }
1395
1396                 delta = jiffies_to_msecs(now - aic->jiffies);
1397                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1398                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1399                 eqd = (pps / 15000) << 2;
1400
1401                 if (eqd < 8)
1402                         eqd = 0;
1403                 eqd = min_t(u32, eqd, aic->max_eqd);
1404                 eqd = max_t(u32, eqd, aic->min_eqd);
1405
1406                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1407 modify_eqd:
1408                 if (eqd != aic->prev_eqd) {
1409                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1410                         set_eqd[num].eq_id = eqo->q.id;
1411                         aic->prev_eqd = eqd;
1412                         num++;
1413                 }
1414         }
1415
1416         if (num)
1417                 be_cmd_modify_eqd(adapter, set_eqd, num);
1418 }
1419
1420 static void be_rx_stats_update(struct be_rx_obj *rxo,
1421                 struct be_rx_compl_info *rxcp)
1422 {
1423         struct be_rx_stats *stats = rx_stats(rxo);
1424
1425         u64_stats_update_begin(&stats->sync);
1426         stats->rx_compl++;
1427         stats->rx_bytes += rxcp->pkt_size;
1428         stats->rx_pkts++;
1429         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1430                 stats->rx_mcast_pkts++;
1431         if (rxcp->err)
1432                 stats->rx_compl_err++;
1433         u64_stats_update_end(&stats->sync);
1434 }
1435
1436 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1437 {
1438         /* L4 checksum is not reliable for non TCP/UDP packets.
1439          * Also ignore ipcksm for ipv6 pkts */
1440         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1441                                 (rxcp->ip_csum || rxcp->ipv6);
1442 }
1443
1444 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1445 {
1446         struct be_adapter *adapter = rxo->adapter;
1447         struct be_rx_page_info *rx_page_info;
1448         struct be_queue_info *rxq = &rxo->q;
1449         u16 frag_idx = rxq->tail;
1450
1451         rx_page_info = &rxo->page_info_tbl[frag_idx];
1452         BUG_ON(!rx_page_info->page);
1453
1454         if (rx_page_info->last_page_user) {
1455                 dma_unmap_page(&adapter->pdev->dev,
1456                                dma_unmap_addr(rx_page_info, bus),
1457                                adapter->big_page_size, DMA_FROM_DEVICE);
1458                 rx_page_info->last_page_user = false;
1459         }
1460
1461         queue_tail_inc(rxq);
1462         atomic_dec(&rxq->used);
1463         return rx_page_info;
1464 }
1465
1466 /* Throwaway the data in the Rx completion */
1467 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1468                                 struct be_rx_compl_info *rxcp)
1469 {
1470         struct be_rx_page_info *page_info;
1471         u16 i, num_rcvd = rxcp->num_rcvd;
1472
1473         for (i = 0; i < num_rcvd; i++) {
1474                 page_info = get_rx_page_info(rxo);
1475                 put_page(page_info->page);
1476                 memset(page_info, 0, sizeof(*page_info));
1477         }
1478 }
1479
1480 /*
1481  * skb_fill_rx_data forms a complete skb for an ether frame
1482  * indicated by rxcp.
1483  */
1484 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1485                              struct be_rx_compl_info *rxcp)
1486 {
1487         struct be_rx_page_info *page_info;
1488         u16 i, j;
1489         u16 hdr_len, curr_frag_len, remaining;
1490         u8 *start;
1491
1492         page_info = get_rx_page_info(rxo);
1493         start = page_address(page_info->page) + page_info->page_offset;
1494         prefetch(start);
1495
1496         /* Copy data in the first descriptor of this completion */
1497         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1498
1499         skb->len = curr_frag_len;
1500         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1501                 memcpy(skb->data, start, curr_frag_len);
1502                 /* Complete packet has now been moved to data */
1503                 put_page(page_info->page);
1504                 skb->data_len = 0;
1505                 skb->tail += curr_frag_len;
1506         } else {
1507                 hdr_len = ETH_HLEN;
1508                 memcpy(skb->data, start, hdr_len);
1509                 skb_shinfo(skb)->nr_frags = 1;
1510                 skb_frag_set_page(skb, 0, page_info->page);
1511                 skb_shinfo(skb)->frags[0].page_offset =
1512                                         page_info->page_offset + hdr_len;
1513                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1514                 skb->data_len = curr_frag_len - hdr_len;
1515                 skb->truesize += rx_frag_size;
1516                 skb->tail += hdr_len;
1517         }
1518         page_info->page = NULL;
1519
1520         if (rxcp->pkt_size <= rx_frag_size) {
1521                 BUG_ON(rxcp->num_rcvd != 1);
1522                 return;
1523         }
1524
1525         /* More frags present for this completion */
1526         remaining = rxcp->pkt_size - curr_frag_len;
1527         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1528                 page_info = get_rx_page_info(rxo);
1529                 curr_frag_len = min(remaining, rx_frag_size);
1530
1531                 /* Coalesce all frags from the same physical page in one slot */
1532                 if (page_info->page_offset == 0) {
1533                         /* Fresh page */
1534                         j++;
1535                         skb_frag_set_page(skb, j, page_info->page);
1536                         skb_shinfo(skb)->frags[j].page_offset =
1537                                                         page_info->page_offset;
1538                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1539                         skb_shinfo(skb)->nr_frags++;
1540                 } else {
1541                         put_page(page_info->page);
1542                 }
1543
1544                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1545                 skb->len += curr_frag_len;
1546                 skb->data_len += curr_frag_len;
1547                 skb->truesize += rx_frag_size;
1548                 remaining -= curr_frag_len;
1549                 page_info->page = NULL;
1550         }
1551         BUG_ON(j > MAX_SKB_FRAGS);
1552 }
1553
1554 /* Process the RX completion indicated by rxcp when GRO is disabled */
1555 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1556                                 struct be_rx_compl_info *rxcp)
1557 {
1558         struct be_adapter *adapter = rxo->adapter;
1559         struct net_device *netdev = adapter->netdev;
1560         struct sk_buff *skb;
1561
1562         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1563         if (unlikely(!skb)) {
1564                 rx_stats(rxo)->rx_drops_no_skbs++;
1565                 be_rx_compl_discard(rxo, rxcp);
1566                 return;
1567         }
1568
1569         skb_fill_rx_data(rxo, skb, rxcp);
1570
1571         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1572                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1573         else
1574                 skb_checksum_none_assert(skb);
1575
1576         skb->protocol = eth_type_trans(skb, netdev);
1577         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1578         if (netdev->features & NETIF_F_RXHASH)
1579                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1580         skb_mark_napi_id(skb, napi);
1581
1582         if (rxcp->vlanf)
1583                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1584
1585         netif_receive_skb(skb);
1586 }
1587
1588 /* Process the RX completion indicated by rxcp when GRO is enabled */
1589 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1590                                     struct napi_struct *napi,
1591                                     struct be_rx_compl_info *rxcp)
1592 {
1593         struct be_adapter *adapter = rxo->adapter;
1594         struct be_rx_page_info *page_info;
1595         struct sk_buff *skb = NULL;
1596         u16 remaining, curr_frag_len;
1597         u16 i, j;
1598
1599         skb = napi_get_frags(napi);
1600         if (!skb) {
1601                 be_rx_compl_discard(rxo, rxcp);
1602                 return;
1603         }
1604
1605         remaining = rxcp->pkt_size;
1606         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1607                 page_info = get_rx_page_info(rxo);
1608
1609                 curr_frag_len = min(remaining, rx_frag_size);
1610
1611                 /* Coalesce all frags from the same physical page in one slot */
1612                 if (i == 0 || page_info->page_offset == 0) {
1613                         /* First frag or Fresh page */
1614                         j++;
1615                         skb_frag_set_page(skb, j, page_info->page);
1616                         skb_shinfo(skb)->frags[j].page_offset =
1617                                                         page_info->page_offset;
1618                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1619                 } else {
1620                         put_page(page_info->page);
1621                 }
1622                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1623                 skb->truesize += rx_frag_size;
1624                 remaining -= curr_frag_len;
1625                 memset(page_info, 0, sizeof(*page_info));
1626         }
1627         BUG_ON(j > MAX_SKB_FRAGS);
1628
1629         skb_shinfo(skb)->nr_frags = j + 1;
1630         skb->len = rxcp->pkt_size;
1631         skb->data_len = rxcp->pkt_size;
1632         skb->ip_summed = CHECKSUM_UNNECESSARY;
1633         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1634         if (adapter->netdev->features & NETIF_F_RXHASH)
1635                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1636         skb_mark_napi_id(skb, napi);
1637
1638         if (rxcp->vlanf)
1639                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1640
1641         napi_gro_frags(napi);
1642 }
1643
1644 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1645                                  struct be_rx_compl_info *rxcp)
1646 {
1647         rxcp->pkt_size =
1648                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1649         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1650         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1651         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1652         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1653         rxcp->ip_csum =
1654                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1655         rxcp->l4_csum =
1656                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1657         rxcp->ipv6 =
1658                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1659         rxcp->num_rcvd =
1660                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1661         rxcp->pkt_type =
1662                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1663         rxcp->rss_hash =
1664                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1665         if (rxcp->vlanf) {
1666                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1667                                           compl);
1668                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1669                                                compl);
1670         }
1671         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1672 }
1673
1674 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1675                                  struct be_rx_compl_info *rxcp)
1676 {
1677         rxcp->pkt_size =
1678                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1679         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1680         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1681         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1682         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1683         rxcp->ip_csum =
1684                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1685         rxcp->l4_csum =
1686                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1687         rxcp->ipv6 =
1688                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1689         rxcp->num_rcvd =
1690                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1691         rxcp->pkt_type =
1692                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1693         rxcp->rss_hash =
1694                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1695         if (rxcp->vlanf) {
1696                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1697                                           compl);
1698                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1699                                                compl);
1700         }
1701         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1702         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1703                                       ip_frag, compl);
1704 }
1705
1706 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1707 {
1708         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1709         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1710         struct be_adapter *adapter = rxo->adapter;
1711
1712         /* For checking the valid bit it is Ok to use either definition as the
1713          * valid bit is at the same position in both v0 and v1 Rx compl */
1714         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1715                 return NULL;
1716
1717         rmb();
1718         be_dws_le_to_cpu(compl, sizeof(*compl));
1719
1720         if (adapter->be3_native)
1721                 be_parse_rx_compl_v1(compl, rxcp);
1722         else
1723                 be_parse_rx_compl_v0(compl, rxcp);
1724
1725         if (rxcp->ip_frag)
1726                 rxcp->l4_csum = 0;
1727
1728         if (rxcp->vlanf) {
1729                 /* vlanf could be wrongly set in some cards.
1730                  * ignore if vtm is not set */
1731                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1732                         rxcp->vlanf = 0;
1733
1734                 if (!lancer_chip(adapter))
1735                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1736
1737                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1738                     !adapter->vlan_tag[rxcp->vlan_tag])
1739                         rxcp->vlanf = 0;
1740         }
1741
1742         /* As the compl has been parsed, reset it; we wont touch it again */
1743         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1744
1745         queue_tail_inc(&rxo->cq);
1746         return rxcp;
1747 }
1748
1749 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1750 {
1751         u32 order = get_order(size);
1752
1753         if (order > 0)
1754                 gfp |= __GFP_COMP;
1755         return  alloc_pages(gfp, order);
1756 }
1757
1758 /*
1759  * Allocate a page, split it to fragments of size rx_frag_size and post as
1760  * receive buffers to BE
1761  */
1762 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1763 {
1764         struct be_adapter *adapter = rxo->adapter;
1765         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1766         struct be_queue_info *rxq = &rxo->q;
1767         struct page *pagep = NULL;
1768         struct device *dev = &adapter->pdev->dev;
1769         struct be_eth_rx_d *rxd;
1770         u64 page_dmaaddr = 0, frag_dmaaddr;
1771         u32 posted, page_offset = 0;
1772
1773         page_info = &rxo->page_info_tbl[rxq->head];
1774         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1775                 if (!pagep) {
1776                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1777                         if (unlikely(!pagep)) {
1778                                 rx_stats(rxo)->rx_post_fail++;
1779                                 break;
1780                         }
1781                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1782                                                     adapter->big_page_size,
1783                                                     DMA_FROM_DEVICE);
1784                         if (dma_mapping_error(dev, page_dmaaddr)) {
1785                                 put_page(pagep);
1786                                 pagep = NULL;
1787                                 rx_stats(rxo)->rx_post_fail++;
1788                                 break;
1789                         }
1790                         page_info->page_offset = 0;
1791                 } else {
1792                         get_page(pagep);
1793                         page_info->page_offset = page_offset + rx_frag_size;
1794                 }
1795                 page_offset = page_info->page_offset;
1796                 page_info->page = pagep;
1797                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1798                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1799
1800                 rxd = queue_head_node(rxq);
1801                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1802                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1803
1804                 /* Any space left in the current big page for another frag? */
1805                 if ((page_offset + rx_frag_size + rx_frag_size) >
1806                                         adapter->big_page_size) {
1807                         pagep = NULL;
1808                         page_info->last_page_user = true;
1809                 }
1810
1811                 prev_page_info = page_info;
1812                 queue_head_inc(rxq);
1813                 page_info = &rxo->page_info_tbl[rxq->head];
1814         }
1815         if (pagep)
1816                 prev_page_info->last_page_user = true;
1817
1818         if (posted) {
1819                 atomic_add(posted, &rxq->used);
1820                 if (rxo->rx_post_starved)
1821                         rxo->rx_post_starved = false;
1822                 be_rxq_notify(adapter, rxq->id, posted);
1823         } else if (atomic_read(&rxq->used) == 0) {
1824                 /* Let be_worker replenish when memory is available */
1825                 rxo->rx_post_starved = true;
1826         }
1827 }
1828
1829 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1830 {
1831         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1832
1833         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1834                 return NULL;
1835
1836         rmb();
1837         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1838
1839         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1840
1841         queue_tail_inc(tx_cq);
1842         return txcp;
1843 }
1844
1845 static u16 be_tx_compl_process(struct be_adapter *adapter,
1846                 struct be_tx_obj *txo, u16 last_index)
1847 {
1848         struct be_queue_info *txq = &txo->q;
1849         struct be_eth_wrb *wrb;
1850         struct sk_buff **sent_skbs = txo->sent_skb_list;
1851         struct sk_buff *sent_skb;
1852         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1853         bool unmap_skb_hdr = true;
1854
1855         sent_skb = sent_skbs[txq->tail];
1856         BUG_ON(!sent_skb);
1857         sent_skbs[txq->tail] = NULL;
1858
1859         /* skip header wrb */
1860         queue_tail_inc(txq);
1861
1862         do {
1863                 cur_index = txq->tail;
1864                 wrb = queue_tail_node(txq);
1865                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1866                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1867                 unmap_skb_hdr = false;
1868
1869                 num_wrbs++;
1870                 queue_tail_inc(txq);
1871         } while (cur_index != last_index);
1872
1873         kfree_skb(sent_skb);
1874         return num_wrbs;
1875 }
1876
1877 /* Return the number of events in the event queue */
1878 static inline int events_get(struct be_eq_obj *eqo)
1879 {
1880         struct be_eq_entry *eqe;
1881         int num = 0;
1882
1883         do {
1884                 eqe = queue_tail_node(&eqo->q);
1885                 if (eqe->evt == 0)
1886                         break;
1887
1888                 rmb();
1889                 eqe->evt = 0;
1890                 num++;
1891                 queue_tail_inc(&eqo->q);
1892         } while (true);
1893
1894         return num;
1895 }
1896
1897 /* Leaves the EQ is disarmed state */
1898 static void be_eq_clean(struct be_eq_obj *eqo)
1899 {
1900         int num = events_get(eqo);
1901
1902         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1903 }
1904
1905 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1906 {
1907         struct be_rx_page_info *page_info;
1908         struct be_queue_info *rxq = &rxo->q;
1909         struct be_queue_info *rx_cq = &rxo->cq;
1910         struct be_rx_compl_info *rxcp;
1911         struct be_adapter *adapter = rxo->adapter;
1912         int flush_wait = 0;
1913
1914         /* Consume pending rx completions.
1915          * Wait for the flush completion (identified by zero num_rcvd)
1916          * to arrive. Notify CQ even when there are no more CQ entries
1917          * for HW to flush partially coalesced CQ entries.
1918          * In Lancer, there is no need to wait for flush compl.
1919          */
1920         for (;;) {
1921                 rxcp = be_rx_compl_get(rxo);
1922                 if (rxcp == NULL) {
1923                         if (lancer_chip(adapter))
1924                                 break;
1925
1926                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1927                                 dev_warn(&adapter->pdev->dev,
1928                                          "did not receive flush compl\n");
1929                                 break;
1930                         }
1931                         be_cq_notify(adapter, rx_cq->id, true, 0);
1932                         mdelay(1);
1933                 } else {
1934                         be_rx_compl_discard(rxo, rxcp);
1935                         be_cq_notify(adapter, rx_cq->id, false, 1);
1936                         if (rxcp->num_rcvd == 0)
1937                                 break;
1938                 }
1939         }
1940
1941         /* After cleanup, leave the CQ in unarmed state */
1942         be_cq_notify(adapter, rx_cq->id, false, 0);
1943
1944         /* Then free posted rx buffers that were not used */
1945         while (atomic_read(&rxq->used) > 0) {
1946                 page_info = get_rx_page_info(rxo);
1947                 put_page(page_info->page);
1948                 memset(page_info, 0, sizeof(*page_info));
1949         }
1950         BUG_ON(atomic_read(&rxq->used));
1951         rxq->tail = rxq->head = 0;
1952 }
1953
1954 static void be_tx_compl_clean(struct be_adapter *adapter)
1955 {
1956         struct be_tx_obj *txo;
1957         struct be_queue_info *txq;
1958         struct be_eth_tx_compl *txcp;
1959         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1960         struct sk_buff *sent_skb;
1961         bool dummy_wrb;
1962         int i, pending_txqs;
1963
1964         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1965         do {
1966                 pending_txqs = adapter->num_tx_qs;
1967
1968                 for_all_tx_queues(adapter, txo, i) {
1969                         txq = &txo->q;
1970                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1971                                 end_idx =
1972                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1973                                                       wrb_index, txcp);
1974                                 num_wrbs += be_tx_compl_process(adapter, txo,
1975                                                                 end_idx);
1976                                 cmpl++;
1977                         }
1978                         if (cmpl) {
1979                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1980                                 atomic_sub(num_wrbs, &txq->used);
1981                                 cmpl = 0;
1982                                 num_wrbs = 0;
1983                         }
1984                         if (atomic_read(&txq->used) == 0)
1985                                 pending_txqs--;
1986                 }
1987
1988                 if (pending_txqs == 0 || ++timeo > 200)
1989                         break;
1990
1991                 mdelay(1);
1992         } while (true);
1993
1994         for_all_tx_queues(adapter, txo, i) {
1995                 txq = &txo->q;
1996                 if (atomic_read(&txq->used))
1997                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1998                                 atomic_read(&txq->used));
1999
2000                 /* free posted tx for which compls will never arrive */
2001                 while (atomic_read(&txq->used)) {
2002                         sent_skb = txo->sent_skb_list[txq->tail];
2003                         end_idx = txq->tail;
2004                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2005                                                    &dummy_wrb);
2006                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2007                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2008                         atomic_sub(num_wrbs, &txq->used);
2009                 }
2010         }
2011 }
2012
2013 static void be_evt_queues_destroy(struct be_adapter *adapter)
2014 {
2015         struct be_eq_obj *eqo;
2016         int i;
2017
2018         for_all_evt_queues(adapter, eqo, i) {
2019                 if (eqo->q.created) {
2020                         be_eq_clean(eqo);
2021                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2022                         napi_hash_del(&eqo->napi);
2023                         netif_napi_del(&eqo->napi);
2024                 }
2025                 be_queue_free(adapter, &eqo->q);
2026         }
2027 }
2028
2029 static int be_evt_queues_create(struct be_adapter *adapter)
2030 {
2031         struct be_queue_info *eq;
2032         struct be_eq_obj *eqo;
2033         struct be_aic_obj *aic;
2034         int i, rc;
2035
2036         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2037                                     adapter->cfg_num_qs);
2038
2039         for_all_evt_queues(adapter, eqo, i) {
2040                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2041                                BE_NAPI_WEIGHT);
2042                 napi_hash_add(&eqo->napi);
2043                 aic = &adapter->aic_obj[i];
2044                 eqo->adapter = adapter;
2045                 eqo->tx_budget = BE_TX_BUDGET;
2046                 eqo->idx = i;
2047                 aic->max_eqd = BE_MAX_EQD;
2048                 aic->enable = true;
2049
2050                 eq = &eqo->q;
2051                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2052                                         sizeof(struct be_eq_entry));
2053                 if (rc)
2054                         return rc;
2055
2056                 rc = be_cmd_eq_create(adapter, eqo);
2057                 if (rc)
2058                         return rc;
2059         }
2060         return 0;
2061 }
2062
2063 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2064 {
2065         struct be_queue_info *q;
2066
2067         q = &adapter->mcc_obj.q;
2068         if (q->created)
2069                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2070         be_queue_free(adapter, q);
2071
2072         q = &adapter->mcc_obj.cq;
2073         if (q->created)
2074                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2075         be_queue_free(adapter, q);
2076 }
2077
2078 /* Must be called only after TX qs are created as MCC shares TX EQ */
2079 static int be_mcc_queues_create(struct be_adapter *adapter)
2080 {
2081         struct be_queue_info *q, *cq;
2082
2083         cq = &adapter->mcc_obj.cq;
2084         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2085                         sizeof(struct be_mcc_compl)))
2086                 goto err;
2087
2088         /* Use the default EQ for MCC completions */
2089         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2090                 goto mcc_cq_free;
2091
2092         q = &adapter->mcc_obj.q;
2093         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2094                 goto mcc_cq_destroy;
2095
2096         if (be_cmd_mccq_create(adapter, q, cq))
2097                 goto mcc_q_free;
2098
2099         return 0;
2100
2101 mcc_q_free:
2102         be_queue_free(adapter, q);
2103 mcc_cq_destroy:
2104         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2105 mcc_cq_free:
2106         be_queue_free(adapter, cq);
2107 err:
2108         return -1;
2109 }
2110
2111 static void be_tx_queues_destroy(struct be_adapter *adapter)
2112 {
2113         struct be_queue_info *q;
2114         struct be_tx_obj *txo;
2115         u8 i;
2116
2117         for_all_tx_queues(adapter, txo, i) {
2118                 q = &txo->q;
2119                 if (q->created)
2120                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2121                 be_queue_free(adapter, q);
2122
2123                 q = &txo->cq;
2124                 if (q->created)
2125                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2126                 be_queue_free(adapter, q);
2127         }
2128 }
2129
2130 static int be_tx_qs_create(struct be_adapter *adapter)
2131 {
2132         struct be_queue_info *cq, *eq;
2133         struct be_tx_obj *txo;
2134         int status, i;
2135
2136         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2137
2138         for_all_tx_queues(adapter, txo, i) {
2139                 cq = &txo->cq;
2140                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2141                                         sizeof(struct be_eth_tx_compl));
2142                 if (status)
2143                         return status;
2144
2145                 u64_stats_init(&txo->stats.sync);
2146                 u64_stats_init(&txo->stats.sync_compl);
2147
2148                 /* If num_evt_qs is less than num_tx_qs, then more than
2149                  * one txq share an eq
2150                  */
2151                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2152                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2153                 if (status)
2154                         return status;
2155
2156                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2157                                         sizeof(struct be_eth_wrb));
2158                 if (status)
2159                         return status;
2160
2161                 status = be_cmd_txq_create(adapter, txo);
2162                 if (status)
2163                         return status;
2164         }
2165
2166         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2167                  adapter->num_tx_qs);
2168         return 0;
2169 }
2170
2171 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2172 {
2173         struct be_queue_info *q;
2174         struct be_rx_obj *rxo;
2175         int i;
2176
2177         for_all_rx_queues(adapter, rxo, i) {
2178                 q = &rxo->cq;
2179                 if (q->created)
2180                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2181                 be_queue_free(adapter, q);
2182         }
2183 }
2184
2185 static int be_rx_cqs_create(struct be_adapter *adapter)
2186 {
2187         struct be_queue_info *eq, *cq;
2188         struct be_rx_obj *rxo;
2189         int rc, i;
2190
2191         /* We can create as many RSS rings as there are EQs. */
2192         adapter->num_rx_qs = adapter->num_evt_qs;
2193
2194         /* We'll use RSS only if atleast 2 RSS rings are supported.
2195          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2196          */
2197         if (adapter->num_rx_qs > 1)
2198                 adapter->num_rx_qs++;
2199
2200         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2201         for_all_rx_queues(adapter, rxo, i) {
2202                 rxo->adapter = adapter;
2203                 cq = &rxo->cq;
2204                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2205                                 sizeof(struct be_eth_rx_compl));
2206                 if (rc)
2207                         return rc;
2208
2209                 u64_stats_init(&rxo->stats.sync);
2210                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2211                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2212                 if (rc)
2213                         return rc;
2214         }
2215
2216         dev_info(&adapter->pdev->dev,
2217                  "created %d RSS queue(s) and 1 default RX queue\n",
2218                  adapter->num_rx_qs - 1);
2219         return 0;
2220 }
2221
2222 static irqreturn_t be_intx(int irq, void *dev)
2223 {
2224         struct be_eq_obj *eqo = dev;
2225         struct be_adapter *adapter = eqo->adapter;
2226         int num_evts = 0;
2227
2228         /* IRQ is not expected when NAPI is scheduled as the EQ
2229          * will not be armed.
2230          * But, this can happen on Lancer INTx where it takes
2231          * a while to de-assert INTx or in BE2 where occasionaly
2232          * an interrupt may be raised even when EQ is unarmed.
2233          * If NAPI is already scheduled, then counting & notifying
2234          * events will orphan them.
2235          */
2236         if (napi_schedule_prep(&eqo->napi)) {
2237                 num_evts = events_get(eqo);
2238                 __napi_schedule(&eqo->napi);
2239                 if (num_evts)
2240                         eqo->spurious_intr = 0;
2241         }
2242         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2243
2244         /* Return IRQ_HANDLED only for the the first spurious intr
2245          * after a valid intr to stop the kernel from branding
2246          * this irq as a bad one!
2247          */
2248         if (num_evts || eqo->spurious_intr++ == 0)
2249                 return IRQ_HANDLED;
2250         else
2251                 return IRQ_NONE;
2252 }
2253
2254 static irqreturn_t be_msix(int irq, void *dev)
2255 {
2256         struct be_eq_obj *eqo = dev;
2257
2258         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2259         napi_schedule(&eqo->napi);
2260         return IRQ_HANDLED;
2261 }
2262
2263 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2264 {
2265         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2266 }
2267
2268 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2269                         int budget, int polling)
2270 {
2271         struct be_adapter *adapter = rxo->adapter;
2272         struct be_queue_info *rx_cq = &rxo->cq;
2273         struct be_rx_compl_info *rxcp;
2274         u32 work_done;
2275
2276         for (work_done = 0; work_done < budget; work_done++) {
2277                 rxcp = be_rx_compl_get(rxo);
2278                 if (!rxcp)
2279                         break;
2280
2281                 /* Is it a flush compl that has no data */
2282                 if (unlikely(rxcp->num_rcvd == 0))
2283                         goto loop_continue;
2284
2285                 /* Discard compl with partial DMA Lancer B0 */
2286                 if (unlikely(!rxcp->pkt_size)) {
2287                         be_rx_compl_discard(rxo, rxcp);
2288                         goto loop_continue;
2289                 }
2290
2291                 /* On BE drop pkts that arrive due to imperfect filtering in
2292                  * promiscuous mode on some skews
2293                  */
2294                 if (unlikely(rxcp->port != adapter->port_num &&
2295                                 !lancer_chip(adapter))) {
2296                         be_rx_compl_discard(rxo, rxcp);
2297                         goto loop_continue;
2298                 }
2299
2300                 /* Don't do gro when we're busy_polling */
2301                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2302                         be_rx_compl_process_gro(rxo, napi, rxcp);
2303                 else
2304                         be_rx_compl_process(rxo, napi, rxcp);
2305
2306 loop_continue:
2307                 be_rx_stats_update(rxo, rxcp);
2308         }
2309
2310         if (work_done) {
2311                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2312
2313                 /* When an rx-obj gets into post_starved state, just
2314                  * let be_worker do the posting.
2315                  */
2316                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2317                     !rxo->rx_post_starved)
2318                         be_post_rx_frags(rxo, GFP_ATOMIC);
2319         }
2320
2321         return work_done;
2322 }
2323
2324 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2325                           int budget, int idx)
2326 {
2327         struct be_eth_tx_compl *txcp;
2328         int num_wrbs = 0, work_done;
2329
2330         for (work_done = 0; work_done < budget; work_done++) {
2331                 txcp = be_tx_compl_get(&txo->cq);
2332                 if (!txcp)
2333                         break;
2334                 num_wrbs += be_tx_compl_process(adapter, txo,
2335                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2336                                         wrb_index, txcp));
2337         }
2338
2339         if (work_done) {
2340                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2341                 atomic_sub(num_wrbs, &txo->q.used);
2342
2343                 /* As Tx wrbs have been freed up, wake up netdev queue
2344                  * if it was stopped due to lack of tx wrbs.  */
2345                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2346                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2347                         netif_wake_subqueue(adapter->netdev, idx);
2348                 }
2349
2350                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2351                 tx_stats(txo)->tx_compl += work_done;
2352                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2353         }
2354         return (work_done < budget); /* Done */
2355 }
2356
2357 int be_poll(struct napi_struct *napi, int budget)
2358 {
2359         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2360         struct be_adapter *adapter = eqo->adapter;
2361         int max_work = 0, work, i, num_evts;
2362         struct be_rx_obj *rxo;
2363         bool tx_done;
2364
2365         num_evts = events_get(eqo);
2366
2367         /* Process all TXQs serviced by this EQ */
2368         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2369                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2370                                         eqo->tx_budget, i);
2371                 if (!tx_done)
2372                         max_work = budget;
2373         }
2374
2375         if (be_lock_napi(eqo)) {
2376                 /* This loop will iterate twice for EQ0 in which
2377                  * completions of the last RXQ (default one) are also processed
2378                  * For other EQs the loop iterates only once
2379                  */
2380                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2381                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2382                         max_work = max(work, max_work);
2383                 }
2384                 be_unlock_napi(eqo);
2385         } else {
2386                 max_work = budget;
2387         }
2388
2389         if (is_mcc_eqo(eqo))
2390                 be_process_mcc(adapter);
2391
2392         if (max_work < budget) {
2393                 napi_complete(napi);
2394                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2395         } else {
2396                 /* As we'll continue in polling mode, count and clear events */
2397                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2398         }
2399         return max_work;
2400 }
2401
2402 #ifdef CONFIG_NET_RX_BUSY_POLL
2403 static int be_busy_poll(struct napi_struct *napi)
2404 {
2405         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2406         struct be_adapter *adapter = eqo->adapter;
2407         struct be_rx_obj *rxo;
2408         int i, work = 0;
2409
2410         if (!be_lock_busy_poll(eqo))
2411                 return LL_FLUSH_BUSY;
2412
2413         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2414                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2415                 if (work)
2416                         break;
2417         }
2418
2419         be_unlock_busy_poll(eqo);
2420         return work;
2421 }
2422 #endif
2423
2424 void be_detect_error(struct be_adapter *adapter)
2425 {
2426         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2427         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2428         u32 i;
2429
2430         if (be_hw_error(adapter))
2431                 return;
2432
2433         if (lancer_chip(adapter)) {
2434                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2435                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2436                         sliport_err1 = ioread32(adapter->db +
2437                                         SLIPORT_ERROR1_OFFSET);
2438                         sliport_err2 = ioread32(adapter->db +
2439                                         SLIPORT_ERROR2_OFFSET);
2440                 }
2441         } else {
2442                 pci_read_config_dword(adapter->pdev,
2443                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2444                 pci_read_config_dword(adapter->pdev,
2445                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2446                 pci_read_config_dword(adapter->pdev,
2447                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2448                 pci_read_config_dword(adapter->pdev,
2449                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2450
2451                 ue_lo = (ue_lo & ~ue_lo_mask);
2452                 ue_hi = (ue_hi & ~ue_hi_mask);
2453         }
2454
2455         /* On certain platforms BE hardware can indicate spurious UEs.
2456          * Allow the h/w to stop working completely in case of a real UE.
2457          * Hence not setting the hw_error for UE detection.
2458          */
2459         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2460                 adapter->hw_error = true;
2461                 /* Do not log error messages if its a FW reset */
2462                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2463                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2464                         dev_info(&adapter->pdev->dev,
2465                                  "Firmware update in progress\n");
2466                         return;
2467                 } else {
2468                         dev_err(&adapter->pdev->dev,
2469                                 "Error detected in the card\n");
2470                 }
2471         }
2472
2473         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2474                 dev_err(&adapter->pdev->dev,
2475                         "ERR: sliport status 0x%x\n", sliport_status);
2476                 dev_err(&adapter->pdev->dev,
2477                         "ERR: sliport error1 0x%x\n", sliport_err1);
2478                 dev_err(&adapter->pdev->dev,
2479                         "ERR: sliport error2 0x%x\n", sliport_err2);
2480         }
2481
2482         if (ue_lo) {
2483                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2484                         if (ue_lo & 1)
2485                                 dev_err(&adapter->pdev->dev,
2486                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2487                 }
2488         }
2489
2490         if (ue_hi) {
2491                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2492                         if (ue_hi & 1)
2493                                 dev_err(&adapter->pdev->dev,
2494                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2495                 }
2496         }
2497
2498 }
2499
2500 static void be_msix_disable(struct be_adapter *adapter)
2501 {
2502         if (msix_enabled(adapter)) {
2503                 pci_disable_msix(adapter->pdev);
2504                 adapter->num_msix_vec = 0;
2505                 adapter->num_msix_roce_vec = 0;
2506         }
2507 }
2508
2509 static int be_msix_enable(struct be_adapter *adapter)
2510 {
2511         int i, status, num_vec;
2512         struct device *dev = &adapter->pdev->dev;
2513
2514         /* If RoCE is supported, program the max number of NIC vectors that
2515          * may be configured via set-channels, along with vectors needed for
2516          * RoCe. Else, just program the number we'll use initially.
2517          */
2518         if (be_roce_supported(adapter))
2519                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2520                                 2 * num_online_cpus());
2521         else
2522                 num_vec = adapter->cfg_num_qs;
2523
2524         for (i = 0; i < num_vec; i++)
2525                 adapter->msix_entries[i].entry = i;
2526
2527         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2528         if (status == 0) {
2529                 goto done;
2530         } else if (status >= MIN_MSIX_VECTORS) {
2531                 num_vec = status;
2532                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2533                                          num_vec);
2534                 if (!status)
2535                         goto done;
2536         }
2537
2538         dev_warn(dev, "MSIx enable failed\n");
2539
2540         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2541         if (!be_physfn(adapter))
2542                 return status;
2543         return 0;
2544 done:
2545         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2546                 adapter->num_msix_roce_vec = num_vec / 2;
2547                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2548                          adapter->num_msix_roce_vec);
2549         }
2550
2551         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2552
2553         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2554                  adapter->num_msix_vec);
2555         return 0;
2556 }
2557
2558 static inline int be_msix_vec_get(struct be_adapter *adapter,
2559                                 struct be_eq_obj *eqo)
2560 {
2561         return adapter->msix_entries[eqo->msix_idx].vector;
2562 }
2563
2564 static int be_msix_register(struct be_adapter *adapter)
2565 {
2566         struct net_device *netdev = adapter->netdev;
2567         struct be_eq_obj *eqo;
2568         int status, i, vec;
2569
2570         for_all_evt_queues(adapter, eqo, i) {
2571                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2572                 vec = be_msix_vec_get(adapter, eqo);
2573                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2574                 if (status)
2575                         goto err_msix;
2576         }
2577
2578         return 0;
2579 err_msix:
2580         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2581                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2582         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2583                 status);
2584         be_msix_disable(adapter);
2585         return status;
2586 }
2587
2588 static int be_irq_register(struct be_adapter *adapter)
2589 {
2590         struct net_device *netdev = adapter->netdev;
2591         int status;
2592
2593         if (msix_enabled(adapter)) {
2594                 status = be_msix_register(adapter);
2595                 if (status == 0)
2596                         goto done;
2597                 /* INTx is not supported for VF */
2598                 if (!be_physfn(adapter))
2599                         return status;
2600         }
2601
2602         /* INTx: only the first EQ is used */
2603         netdev->irq = adapter->pdev->irq;
2604         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2605                              &adapter->eq_obj[0]);
2606         if (status) {
2607                 dev_err(&adapter->pdev->dev,
2608                         "INTx request IRQ failed - err %d\n", status);
2609                 return status;
2610         }
2611 done:
2612         adapter->isr_registered = true;
2613         return 0;
2614 }
2615
2616 static void be_irq_unregister(struct be_adapter *adapter)
2617 {
2618         struct net_device *netdev = adapter->netdev;
2619         struct be_eq_obj *eqo;
2620         int i;
2621
2622         if (!adapter->isr_registered)
2623                 return;
2624
2625         /* INTx */
2626         if (!msix_enabled(adapter)) {
2627                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2628                 goto done;
2629         }
2630
2631         /* MSIx */
2632         for_all_evt_queues(adapter, eqo, i)
2633                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2634
2635 done:
2636         adapter->isr_registered = false;
2637 }
2638
2639 static void be_rx_qs_destroy(struct be_adapter *adapter)
2640 {
2641         struct be_queue_info *q;
2642         struct be_rx_obj *rxo;
2643         int i;
2644
2645         for_all_rx_queues(adapter, rxo, i) {
2646                 q = &rxo->q;
2647                 if (q->created) {
2648                         be_cmd_rxq_destroy(adapter, q);
2649                         be_rx_cq_clean(rxo);
2650                 }
2651                 be_queue_free(adapter, q);
2652         }
2653 }
2654
2655 static int be_close(struct net_device *netdev)
2656 {
2657         struct be_adapter *adapter = netdev_priv(netdev);
2658         struct be_eq_obj *eqo;
2659         int i;
2660
2661         be_roce_dev_close(adapter);
2662
2663         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2664                 for_all_evt_queues(adapter, eqo, i) {
2665                         napi_disable(&eqo->napi);
2666                         be_disable_busy_poll(eqo);
2667                 }
2668                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2669         }
2670
2671         be_async_mcc_disable(adapter);
2672
2673         /* Wait for all pending tx completions to arrive so that
2674          * all tx skbs are freed.
2675          */
2676         netif_tx_disable(netdev);
2677         be_tx_compl_clean(adapter);
2678
2679         be_rx_qs_destroy(adapter);
2680
2681         for (i = 1; i < (adapter->uc_macs + 1); i++)
2682                 be_cmd_pmac_del(adapter, adapter->if_handle,
2683                                 adapter->pmac_id[i], 0);
2684         adapter->uc_macs = 0;
2685
2686         for_all_evt_queues(adapter, eqo, i) {
2687                 if (msix_enabled(adapter))
2688                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2689                 else
2690                         synchronize_irq(netdev->irq);
2691                 be_eq_clean(eqo);
2692         }
2693
2694         be_irq_unregister(adapter);
2695
2696         return 0;
2697 }
2698
2699 static int be_rx_qs_create(struct be_adapter *adapter)
2700 {
2701         struct be_rx_obj *rxo;
2702         int rc, i, j;
2703         u8 rsstable[128];
2704
2705         for_all_rx_queues(adapter, rxo, i) {
2706                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2707                                     sizeof(struct be_eth_rx_d));
2708                 if (rc)
2709                         return rc;
2710         }
2711
2712         /* The FW would like the default RXQ to be created first */
2713         rxo = default_rxo(adapter);
2714         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2715                                adapter->if_handle, false, &rxo->rss_id);
2716         if (rc)
2717                 return rc;
2718
2719         for_all_rss_queues(adapter, rxo, i) {
2720                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2721                                        rx_frag_size, adapter->if_handle,
2722                                        true, &rxo->rss_id);
2723                 if (rc)
2724                         return rc;
2725         }
2726
2727         if (be_multi_rxq(adapter)) {
2728                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2729                         for_all_rss_queues(adapter, rxo, i) {
2730                                 if ((j + i) >= 128)
2731                                         break;
2732                                 rsstable[j + i] = rxo->rss_id;
2733                         }
2734                 }
2735                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2736                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2737
2738                 if (!BEx_chip(adapter))
2739                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2740                                                 RSS_ENABLE_UDP_IPV6;
2741         } else {
2742                 /* Disable RSS, if only default RX Q is created */
2743                 adapter->rss_flags = RSS_ENABLE_NONE;
2744         }
2745
2746         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2747                                128);
2748         if (rc) {
2749                 adapter->rss_flags = RSS_ENABLE_NONE;
2750                 return rc;
2751         }
2752
2753         /* First time posting */
2754         for_all_rx_queues(adapter, rxo, i)
2755                 be_post_rx_frags(rxo, GFP_KERNEL);
2756         return 0;
2757 }
2758
2759 static int be_open(struct net_device *netdev)
2760 {
2761         struct be_adapter *adapter = netdev_priv(netdev);
2762         struct be_eq_obj *eqo;
2763         struct be_rx_obj *rxo;
2764         struct be_tx_obj *txo;
2765         u8 link_status;
2766         int status, i;
2767
2768         status = be_rx_qs_create(adapter);
2769         if (status)
2770                 goto err;
2771
2772         status = be_irq_register(adapter);
2773         if (status)
2774                 goto err;
2775
2776         for_all_rx_queues(adapter, rxo, i)
2777                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2778
2779         for_all_tx_queues(adapter, txo, i)
2780                 be_cq_notify(adapter, txo->cq.id, true, 0);
2781
2782         be_async_mcc_enable(adapter);
2783
2784         for_all_evt_queues(adapter, eqo, i) {
2785                 napi_enable(&eqo->napi);
2786                 be_enable_busy_poll(eqo);
2787                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2788         }
2789         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2790
2791         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2792         if (!status)
2793                 be_link_status_update(adapter, link_status);
2794
2795         netif_tx_start_all_queues(netdev);
2796         be_roce_dev_open(adapter);
2797         return 0;
2798 err:
2799         be_close(adapter->netdev);
2800         return -EIO;
2801 }
2802
2803 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2804 {
2805         struct be_dma_mem cmd;
2806         int status = 0;
2807         u8 mac[ETH_ALEN];
2808
2809         memset(mac, 0, ETH_ALEN);
2810
2811         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2812         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2813                                      GFP_KERNEL);
2814         if (cmd.va == NULL)
2815                 return -1;
2816
2817         if (enable) {
2818                 status = pci_write_config_dword(adapter->pdev,
2819                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2820                 if (status) {
2821                         dev_err(&adapter->pdev->dev,
2822                                 "Could not enable Wake-on-lan\n");
2823                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2824                                           cmd.dma);
2825                         return status;
2826                 }
2827                 status = be_cmd_enable_magic_wol(adapter,
2828                                 adapter->netdev->dev_addr, &cmd);
2829                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2830                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2831         } else {
2832                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2833                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2834                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2835         }
2836
2837         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2838         return status;
2839 }
2840
2841 /*
2842  * Generate a seed MAC address from the PF MAC Address using jhash.
2843  * MAC Address for VFs are assigned incrementally starting from the seed.
2844  * These addresses are programmed in the ASIC by the PF and the VF driver
2845  * queries for the MAC address during its probe.
2846  */
2847 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2848 {
2849         u32 vf;
2850         int status = 0;
2851         u8 mac[ETH_ALEN];
2852         struct be_vf_cfg *vf_cfg;
2853
2854         be_vf_eth_addr_generate(adapter, mac);
2855
2856         for_all_vfs(adapter, vf_cfg, vf) {
2857                 if (BEx_chip(adapter))
2858                         status = be_cmd_pmac_add(adapter, mac,
2859                                                  vf_cfg->if_handle,
2860                                                  &vf_cfg->pmac_id, vf + 1);
2861                 else
2862                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2863                                                 vf + 1);
2864
2865                 if (status)
2866                         dev_err(&adapter->pdev->dev,
2867                         "Mac address assignment failed for VF %d\n", vf);
2868                 else
2869                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2870
2871                 mac[5] += 1;
2872         }
2873         return status;
2874 }
2875
2876 static int be_vfs_mac_query(struct be_adapter *adapter)
2877 {
2878         int status, vf;
2879         u8 mac[ETH_ALEN];
2880         struct be_vf_cfg *vf_cfg;
2881
2882         for_all_vfs(adapter, vf_cfg, vf) {
2883                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2884                                                mac, vf_cfg->if_handle,
2885                                                false, vf+1);
2886                 if (status)
2887                         return status;
2888                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2889         }
2890         return 0;
2891 }
2892
2893 static void be_vf_clear(struct be_adapter *adapter)
2894 {
2895         struct be_vf_cfg *vf_cfg;
2896         u32 vf;
2897
2898         if (pci_vfs_assigned(adapter->pdev)) {
2899                 dev_warn(&adapter->pdev->dev,
2900                          "VFs are assigned to VMs: not disabling VFs\n");
2901                 goto done;
2902         }
2903
2904         pci_disable_sriov(adapter->pdev);
2905
2906         for_all_vfs(adapter, vf_cfg, vf) {
2907                 if (BEx_chip(adapter))
2908                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2909                                         vf_cfg->pmac_id, vf + 1);
2910                 else
2911                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2912                                        vf + 1);
2913
2914                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2915         }
2916 done:
2917         kfree(adapter->vf_cfg);
2918         adapter->num_vfs = 0;
2919 }
2920
2921 static void be_clear_queues(struct be_adapter *adapter)
2922 {
2923         be_mcc_queues_destroy(adapter);
2924         be_rx_cqs_destroy(adapter);
2925         be_tx_queues_destroy(adapter);
2926         be_evt_queues_destroy(adapter);
2927 }
2928
2929 static void be_cancel_worker(struct be_adapter *adapter)
2930 {
2931         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2932                 cancel_delayed_work_sync(&adapter->work);
2933                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2934         }
2935 }
2936
2937 static void be_mac_clear(struct be_adapter *adapter)
2938 {
2939         int i;
2940
2941         if (adapter->pmac_id) {
2942                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2943                         be_cmd_pmac_del(adapter, adapter->if_handle,
2944                                         adapter->pmac_id[i], 0);
2945                 adapter->uc_macs = 0;
2946
2947                 kfree(adapter->pmac_id);
2948                 adapter->pmac_id = NULL;
2949         }
2950 }
2951
2952 static int be_clear(struct be_adapter *adapter)
2953 {
2954         be_cancel_worker(adapter);
2955
2956         if (sriov_enabled(adapter))
2957                 be_vf_clear(adapter);
2958
2959         /* delete the primary mac along with the uc-mac list */
2960         be_mac_clear(adapter);
2961
2962         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2963
2964         be_clear_queues(adapter);
2965
2966         be_msix_disable(adapter);
2967         return 0;
2968 }
2969
2970 static int be_vfs_if_create(struct be_adapter *adapter)
2971 {
2972         struct be_resources res = {0};
2973         struct be_vf_cfg *vf_cfg;
2974         u32 cap_flags, en_flags, vf;
2975         int status = 0;
2976
2977         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2978                     BE_IF_FLAGS_MULTICAST;
2979
2980         for_all_vfs(adapter, vf_cfg, vf) {
2981                 if (!BE3_chip(adapter)) {
2982                         status = be_cmd_get_profile_config(adapter, &res,
2983                                                            vf + 1);
2984                         if (!status)
2985                                 cap_flags = res.if_cap_flags;
2986                 }
2987
2988                 /* If a FW profile exists, then cap_flags are updated */
2989                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2990                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2991                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2992                                           &vf_cfg->if_handle, vf + 1);
2993                 if (status)
2994                         goto err;
2995         }
2996 err:
2997         return status;
2998 }
2999
3000 static int be_vf_setup_init(struct be_adapter *adapter)
3001 {
3002         struct be_vf_cfg *vf_cfg;
3003         int vf;
3004
3005         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3006                                   GFP_KERNEL);
3007         if (!adapter->vf_cfg)
3008                 return -ENOMEM;
3009
3010         for_all_vfs(adapter, vf_cfg, vf) {
3011                 vf_cfg->if_handle = -1;
3012                 vf_cfg->pmac_id = -1;
3013         }
3014         return 0;
3015 }
3016
3017 static int be_vf_setup(struct be_adapter *adapter)
3018 {
3019         struct device *dev = &adapter->pdev->dev;
3020         struct be_vf_cfg *vf_cfg;
3021         int status, old_vfs, vf;
3022         u32 privileges;
3023         u16 lnk_speed;
3024
3025         old_vfs = pci_num_vf(adapter->pdev);
3026         if (old_vfs) {
3027                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3028                 if (old_vfs != num_vfs)
3029                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3030                 adapter->num_vfs = old_vfs;
3031         } else {
3032                 if (num_vfs > be_max_vfs(adapter))
3033                         dev_info(dev, "Device supports %d VFs and not %d\n",
3034                                  be_max_vfs(adapter), num_vfs);
3035                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3036                 if (!adapter->num_vfs)
3037                         return 0;
3038         }
3039
3040         status = be_vf_setup_init(adapter);
3041         if (status)
3042                 goto err;
3043
3044         if (old_vfs) {
3045                 for_all_vfs(adapter, vf_cfg, vf) {
3046                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3047                         if (status)
3048                                 goto err;
3049                 }
3050         } else {
3051                 status = be_vfs_if_create(adapter);
3052                 if (status)
3053                         goto err;
3054         }
3055
3056         if (old_vfs) {
3057                 status = be_vfs_mac_query(adapter);
3058                 if (status)
3059                         goto err;
3060         } else {
3061                 status = be_vf_eth_addr_config(adapter);
3062                 if (status)
3063                         goto err;
3064         }
3065
3066         for_all_vfs(adapter, vf_cfg, vf) {
3067                 /* Allow VFs to programs MAC/VLAN filters */
3068                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3069                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3070                         status = be_cmd_set_fn_privileges(adapter,
3071                                                           privileges |
3072                                                           BE_PRIV_FILTMGMT,
3073                                                           vf + 1);
3074                         if (!status)
3075                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3076                                          vf);
3077                 }
3078
3079                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3080                  * Allow full available bandwidth
3081                  */
3082                 if (BE3_chip(adapter) && !old_vfs)
3083                         be_cmd_set_qos(adapter, 1000, vf+1);
3084
3085                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3086                                                   NULL, vf + 1);
3087                 if (!status)
3088                         vf_cfg->tx_rate = lnk_speed;
3089
3090                 if (!old_vfs)
3091                         be_cmd_enable_vf(adapter, vf + 1);
3092         }
3093
3094         if (!old_vfs) {
3095                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3096                 if (status) {
3097                         dev_err(dev, "SRIOV enable failed\n");
3098                         adapter->num_vfs = 0;
3099                         goto err;
3100                 }
3101         }
3102         return 0;
3103 err:
3104         dev_err(dev, "VF setup failed\n");
3105         be_vf_clear(adapter);
3106         return status;
3107 }
3108
3109 /* On BE2/BE3 FW does not suggest the supported limits */
3110 static void BEx_get_resources(struct be_adapter *adapter,
3111                               struct be_resources *res)
3112 {
3113         struct pci_dev *pdev = adapter->pdev;
3114         bool use_sriov = false;
3115         int max_vfs;
3116
3117         max_vfs = pci_sriov_get_totalvfs(pdev);
3118
3119         if (BE3_chip(adapter) && sriov_want(adapter)) {
3120                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3121                 use_sriov = res->max_vfs;
3122         }
3123
3124         if (be_physfn(adapter))
3125                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3126         else
3127                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3128
3129         if (adapter->function_mode & FLEX10_MODE)
3130                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3131         else if (adapter->function_mode & UMC_ENABLED)
3132                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3133         else
3134                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3135         res->max_mcast_mac = BE_MAX_MC;
3136
3137         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3138         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3139             !be_physfn(adapter) || (adapter->port_num > 1))
3140                 res->max_tx_qs = 1;
3141         else
3142                 res->max_tx_qs = BE3_MAX_TX_QS;
3143
3144         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3145             !use_sriov && be_physfn(adapter))
3146                 res->max_rss_qs = (adapter->be3_native) ?
3147                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3148         res->max_rx_qs = res->max_rss_qs + 1;
3149
3150         if (be_physfn(adapter))
3151                 res->max_evt_qs = (max_vfs > 0) ?
3152                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3153         else
3154                 res->max_evt_qs = 1;
3155
3156         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3157         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3158                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3159 }
3160
3161 static void be_setup_init(struct be_adapter *adapter)
3162 {
3163         adapter->vlan_prio_bmap = 0xff;
3164         adapter->phy.link_speed = -1;
3165         adapter->if_handle = -1;
3166         adapter->be3_native = false;
3167         adapter->promiscuous = false;
3168         if (be_physfn(adapter))
3169                 adapter->cmd_privileges = MAX_PRIVILEGES;
3170         else
3171                 adapter->cmd_privileges = MIN_PRIVILEGES;
3172 }
3173
3174 static int be_get_resources(struct be_adapter *adapter)
3175 {
3176         struct device *dev = &adapter->pdev->dev;
3177         struct be_resources res = {0};
3178         int status;
3179
3180         if (BEx_chip(adapter)) {
3181                 BEx_get_resources(adapter, &res);
3182                 adapter->res = res;
3183         }
3184
3185         /* For Lancer, SH etc read per-function resource limits from FW.
3186          * GET_FUNC_CONFIG returns per function guaranteed limits.
3187          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3188          */
3189         if (!BEx_chip(adapter)) {
3190                 status = be_cmd_get_func_config(adapter, &res);
3191                 if (status)
3192                         return status;
3193
3194                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3195                 if (be_roce_supported(adapter))
3196                         res.max_evt_qs /= 2;
3197                 adapter->res = res;
3198
3199                 if (be_physfn(adapter)) {
3200                         status = be_cmd_get_profile_config(adapter, &res, 0);
3201                         if (status)
3202                                 return status;
3203                         adapter->res.max_vfs = res.max_vfs;
3204                 }
3205
3206                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3207                          be_max_txqs(adapter), be_max_rxqs(adapter),
3208                          be_max_rss(adapter), be_max_eqs(adapter),
3209                          be_max_vfs(adapter));
3210                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3211                          be_max_uc(adapter), be_max_mc(adapter),
3212                          be_max_vlans(adapter));
3213         }
3214
3215         return 0;
3216 }
3217
3218 /* Routine to query per function resource limits */
3219 static int be_get_config(struct be_adapter *adapter)
3220 {
3221         u16 profile_id;
3222         int status;
3223
3224         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3225                                      &adapter->function_mode,
3226                                      &adapter->function_caps,
3227                                      &adapter->asic_rev);
3228         if (status)
3229                 return status;
3230
3231          if (be_physfn(adapter)) {
3232                 status = be_cmd_get_active_profile(adapter, &profile_id);
3233                 if (!status)
3234                         dev_info(&adapter->pdev->dev,
3235                                  "Using profile 0x%x\n", profile_id);
3236         }
3237
3238         status = be_get_resources(adapter);
3239         if (status)
3240                 return status;
3241
3242         /* primary mac needs 1 pmac entry */
3243         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3244                                    GFP_KERNEL);
3245         if (!adapter->pmac_id)
3246                 return -ENOMEM;
3247
3248         /* Sanitize cfg_num_qs based on HW and platform limits */
3249         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3250
3251         return 0;
3252 }
3253
3254 static int be_mac_setup(struct be_adapter *adapter)
3255 {
3256         u8 mac[ETH_ALEN];
3257         int status;
3258
3259         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3260                 status = be_cmd_get_perm_mac(adapter, mac);
3261                 if (status)
3262                         return status;
3263
3264                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3265                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3266         } else {
3267                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3268                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3269         }
3270
3271         /* For BE3-R VFs, the PF programs the initial MAC address */
3272         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3273                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3274                                 &adapter->pmac_id[0], 0);
3275         return 0;
3276 }
3277
3278 static void be_schedule_worker(struct be_adapter *adapter)
3279 {
3280         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3281         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3282 }
3283
3284 static int be_setup_queues(struct be_adapter *adapter)
3285 {
3286         struct net_device *netdev = adapter->netdev;
3287         int status;
3288
3289         status = be_evt_queues_create(adapter);
3290         if (status)
3291                 goto err;
3292
3293         status = be_tx_qs_create(adapter);
3294         if (status)
3295                 goto err;
3296
3297         status = be_rx_cqs_create(adapter);
3298         if (status)
3299                 goto err;
3300
3301         status = be_mcc_queues_create(adapter);
3302         if (status)
3303                 goto err;
3304
3305         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3306         if (status)
3307                 goto err;
3308
3309         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3310         if (status)
3311                 goto err;
3312
3313         return 0;
3314 err:
3315         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3316         return status;
3317 }
3318
3319 int be_update_queues(struct be_adapter *adapter)
3320 {
3321         struct net_device *netdev = adapter->netdev;
3322         int status;
3323
3324         if (netif_running(netdev))
3325                 be_close(netdev);
3326
3327         be_cancel_worker(adapter);
3328
3329         /* If any vectors have been shared with RoCE we cannot re-program
3330          * the MSIx table.
3331          */
3332         if (!adapter->num_msix_roce_vec)
3333                 be_msix_disable(adapter);
3334
3335         be_clear_queues(adapter);
3336
3337         if (!msix_enabled(adapter)) {
3338                 status = be_msix_enable(adapter);
3339                 if (status)
3340                         return status;
3341         }
3342
3343         status = be_setup_queues(adapter);
3344         if (status)
3345                 return status;
3346
3347         be_schedule_worker(adapter);
3348
3349         if (netif_running(netdev))
3350                 status = be_open(netdev);
3351
3352         return status;
3353 }
3354
3355 static int be_setup(struct be_adapter *adapter)
3356 {
3357         struct device *dev = &adapter->pdev->dev;
3358         u32 tx_fc, rx_fc, en_flags;
3359         int status;
3360
3361         be_setup_init(adapter);
3362
3363         if (!lancer_chip(adapter))
3364                 be_cmd_req_native_mode(adapter);
3365
3366         status = be_get_config(adapter);
3367         if (status)
3368                 goto err;
3369
3370         status = be_msix_enable(adapter);
3371         if (status)
3372                 goto err;
3373
3374         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3375                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3376         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3377                 en_flags |= BE_IF_FLAGS_RSS;
3378         en_flags = en_flags & be_if_cap_flags(adapter);
3379         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3380                                   &adapter->if_handle, 0);
3381         if (status)
3382                 goto err;
3383
3384         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3385         rtnl_lock();
3386         status = be_setup_queues(adapter);
3387         rtnl_unlock();
3388         if (status)
3389                 goto err;
3390
3391         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3392
3393         status = be_mac_setup(adapter);
3394         if (status)
3395                 goto err;
3396
3397         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3398
3399         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3400                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3401                         adapter->fw_ver);
3402                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3403         }
3404
3405         if (adapter->vlans_added)
3406                 be_vid_config(adapter);
3407
3408         be_set_rx_mode(adapter->netdev);
3409
3410         be_cmd_get_acpi_wol_cap(adapter);
3411
3412         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3413
3414         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3415                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3416                                         adapter->rx_fc);
3417
3418         if (sriov_want(adapter)) {
3419                 if (be_max_vfs(adapter))
3420                         be_vf_setup(adapter);
3421                 else
3422                         dev_warn(dev, "device doesn't support SRIOV\n");
3423         }
3424
3425         status = be_cmd_get_phy_info(adapter);
3426         if (!status && be_pause_supported(adapter))
3427                 adapter->phy.fc_autoneg = 1;
3428
3429         be_schedule_worker(adapter);
3430         return 0;
3431 err:
3432         be_clear(adapter);
3433         return status;
3434 }
3435
3436 #ifdef CONFIG_NET_POLL_CONTROLLER
3437 static void be_netpoll(struct net_device *netdev)
3438 {
3439         struct be_adapter *adapter = netdev_priv(netdev);
3440         struct be_eq_obj *eqo;
3441         int i;
3442
3443         for_all_evt_queues(adapter, eqo, i) {
3444                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3445                 napi_schedule(&eqo->napi);
3446         }
3447
3448         return;
3449 }
3450 #endif
3451
3452 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3453 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3454
3455 static bool be_flash_redboot(struct be_adapter *adapter,
3456                         const u8 *p, u32 img_start, int image_size,
3457                         int hdr_size)
3458 {
3459         u32 crc_offset;
3460         u8 flashed_crc[4];
3461         int status;
3462
3463         crc_offset = hdr_size + img_start + image_size - 4;
3464
3465         p += crc_offset;
3466
3467         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3468                         (image_size - 4));
3469         if (status) {
3470                 dev_err(&adapter->pdev->dev,
3471                 "could not get crc from flash, not flashing redboot\n");
3472                 return false;
3473         }
3474
3475         /*update redboot only if crc does not match*/
3476         if (!memcmp(flashed_crc, p, 4))
3477                 return false;
3478         else
3479                 return true;
3480 }
3481
3482 static bool phy_flashing_required(struct be_adapter *adapter)
3483 {
3484         return (adapter->phy.phy_type == TN_8022 &&
3485                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3486 }
3487
3488 static bool is_comp_in_ufi(struct be_adapter *adapter,
3489                            struct flash_section_info *fsec, int type)
3490 {
3491         int i = 0, img_type = 0;
3492         struct flash_section_info_g2 *fsec_g2 = NULL;
3493
3494         if (BE2_chip(adapter))
3495                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3496
3497         for (i = 0; i < MAX_FLASH_COMP; i++) {
3498                 if (fsec_g2)
3499                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3500                 else
3501                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3502
3503                 if (img_type == type)
3504                         return true;
3505         }
3506         return false;
3507
3508 }
3509
3510 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3511                                          int header_size,
3512                                          const struct firmware *fw)
3513 {
3514         struct flash_section_info *fsec = NULL;
3515         const u8 *p = fw->data;
3516
3517         p += header_size;
3518         while (p < (fw->data + fw->size)) {
3519                 fsec = (struct flash_section_info *)p;
3520                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3521                         return fsec;
3522                 p += 32;
3523         }
3524         return NULL;
3525 }
3526
3527 static int be_flash(struct be_adapter *adapter, const u8 *img,
3528                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3529 {
3530         u32 total_bytes = 0, flash_op, num_bytes = 0;
3531         int status = 0;
3532         struct be_cmd_write_flashrom *req = flash_cmd->va;
3533
3534         total_bytes = img_size;
3535         while (total_bytes) {
3536                 num_bytes = min_t(u32, 32*1024, total_bytes);
3537
3538                 total_bytes -= num_bytes;
3539
3540                 if (!total_bytes) {
3541                         if (optype == OPTYPE_PHY_FW)
3542                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3543                         else
3544                                 flash_op = FLASHROM_OPER_FLASH;
3545                 } else {
3546                         if (optype == OPTYPE_PHY_FW)
3547                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3548                         else
3549                                 flash_op = FLASHROM_OPER_SAVE;
3550                 }
3551
3552                 memcpy(req->data_buf, img, num_bytes);
3553                 img += num_bytes;
3554                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3555                                                 flash_op, num_bytes);
3556                 if (status) {
3557                         if (status == ILLEGAL_IOCTL_REQ &&
3558                             optype == OPTYPE_PHY_FW)
3559                                 break;
3560                         dev_err(&adapter->pdev->dev,
3561                                 "cmd to write to flash rom failed.\n");
3562                         return status;
3563                 }
3564         }
3565         return 0;
3566 }
3567
3568 /* For BE2, BE3 and BE3-R */
3569 static int be_flash_BEx(struct be_adapter *adapter,
3570                          const struct firmware *fw,
3571                          struct be_dma_mem *flash_cmd,
3572                          int num_of_images)
3573
3574 {
3575         int status = 0, i, filehdr_size = 0;
3576         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3577         const u8 *p = fw->data;
3578         const struct flash_comp *pflashcomp;
3579         int num_comp, redboot;
3580         struct flash_section_info *fsec = NULL;
3581
3582         struct flash_comp gen3_flash_types[] = {
3583                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3584                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3585                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3586                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3587                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3588                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3589                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3590                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3591                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3592                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3593                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3594                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3595                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3596                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3597                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3598                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3599                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3600                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3601                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3602                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3603         };
3604
3605         struct flash_comp gen2_flash_types[] = {
3606                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3607                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3608                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3609                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3610                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3611                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3612                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3613                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3614                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3615                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3616                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3617                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3618                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3619                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3620                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3621                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3622         };
3623
3624         if (BE3_chip(adapter)) {
3625                 pflashcomp = gen3_flash_types;
3626                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3627                 num_comp = ARRAY_SIZE(gen3_flash_types);
3628         } else {
3629                 pflashcomp = gen2_flash_types;
3630                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3631                 num_comp = ARRAY_SIZE(gen2_flash_types);
3632         }
3633
3634         /* Get flash section info*/
3635         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3636         if (!fsec) {
3637                 dev_err(&adapter->pdev->dev,
3638                         "Invalid Cookie. UFI corrupted ?\n");
3639                 return -1;
3640         }
3641         for (i = 0; i < num_comp; i++) {
3642                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3643                         continue;
3644
3645                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3646                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3647                         continue;
3648
3649                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3650                     !phy_flashing_required(adapter))
3651                                 continue;
3652
3653                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3654                         redboot = be_flash_redboot(adapter, fw->data,
3655                                 pflashcomp[i].offset, pflashcomp[i].size,
3656                                 filehdr_size + img_hdrs_size);
3657                         if (!redboot)
3658                                 continue;
3659                 }
3660
3661                 p = fw->data;
3662                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3663                 if (p + pflashcomp[i].size > fw->data + fw->size)
3664                         return -1;
3665
3666                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3667                                         pflashcomp[i].size);
3668                 if (status) {
3669                         dev_err(&adapter->pdev->dev,
3670                                 "Flashing section type %d failed.\n",
3671                                 pflashcomp[i].img_type);
3672                         return status;
3673                 }
3674         }
3675         return 0;
3676 }
3677
3678 static int be_flash_skyhawk(struct be_adapter *adapter,
3679                 const struct firmware *fw,
3680                 struct be_dma_mem *flash_cmd, int num_of_images)
3681 {
3682         int status = 0, i, filehdr_size = 0;
3683         int img_offset, img_size, img_optype, redboot;
3684         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3685         const u8 *p = fw->data;
3686         struct flash_section_info *fsec = NULL;
3687
3688         filehdr_size = sizeof(struct flash_file_hdr_g3);
3689         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3690         if (!fsec) {
3691                 dev_err(&adapter->pdev->dev,
3692                         "Invalid Cookie. UFI corrupted ?\n");
3693                 return -1;
3694         }
3695
3696         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3697                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3698                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3699
3700                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3701                 case IMAGE_FIRMWARE_iSCSI:
3702                         img_optype = OPTYPE_ISCSI_ACTIVE;
3703                         break;
3704                 case IMAGE_BOOT_CODE:
3705                         img_optype = OPTYPE_REDBOOT;
3706                         break;
3707                 case IMAGE_OPTION_ROM_ISCSI:
3708                         img_optype = OPTYPE_BIOS;
3709                         break;
3710                 case IMAGE_OPTION_ROM_PXE:
3711                         img_optype = OPTYPE_PXE_BIOS;
3712                         break;
3713                 case IMAGE_OPTION_ROM_FCoE:
3714                         img_optype = OPTYPE_FCOE_BIOS;
3715                         break;
3716                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3717                         img_optype = OPTYPE_ISCSI_BACKUP;
3718                         break;
3719                 case IMAGE_NCSI:
3720                         img_optype = OPTYPE_NCSI_FW;
3721                         break;
3722                 default:
3723                         continue;
3724                 }
3725
3726                 if (img_optype == OPTYPE_REDBOOT) {
3727                         redboot = be_flash_redboot(adapter, fw->data,
3728                                         img_offset, img_size,
3729                                         filehdr_size + img_hdrs_size);
3730                         if (!redboot)
3731                                 continue;
3732                 }
3733
3734                 p = fw->data;
3735                 p += filehdr_size + img_offset + img_hdrs_size;
3736                 if (p + img_size > fw->data + fw->size)
3737                         return -1;
3738
3739                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3740                 if (status) {
3741                         dev_err(&adapter->pdev->dev,
3742                                 "Flashing section type %d failed.\n",
3743                                 fsec->fsec_entry[i].type);
3744                         return status;
3745                 }
3746         }
3747         return 0;
3748 }
3749
3750 static int lancer_fw_download(struct be_adapter *adapter,
3751                                 const struct firmware *fw)
3752 {
3753 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3754 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3755         struct be_dma_mem flash_cmd;
3756         const u8 *data_ptr = NULL;
3757         u8 *dest_image_ptr = NULL;
3758         size_t image_size = 0;
3759         u32 chunk_size = 0;
3760         u32 data_written = 0;
3761         u32 offset = 0;
3762         int status = 0;
3763         u8 add_status = 0;
3764         u8 change_status;
3765
3766         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3767                 dev_err(&adapter->pdev->dev,
3768                         "FW Image not properly aligned. "
3769                         "Length must be 4 byte aligned.\n");
3770                 status = -EINVAL;
3771                 goto lancer_fw_exit;
3772         }
3773
3774         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3775                                 + LANCER_FW_DOWNLOAD_CHUNK;
3776         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3777                                           &flash_cmd.dma, GFP_KERNEL);
3778         if (!flash_cmd.va) {
3779                 status = -ENOMEM;
3780                 goto lancer_fw_exit;
3781         }
3782
3783         dest_image_ptr = flash_cmd.va +
3784                                 sizeof(struct lancer_cmd_req_write_object);
3785         image_size = fw->size;
3786         data_ptr = fw->data;
3787
3788         while (image_size) {
3789                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3790
3791                 /* Copy the image chunk content. */
3792                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3793
3794                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3795                                                  chunk_size, offset,
3796                                                  LANCER_FW_DOWNLOAD_LOCATION,
3797                                                  &data_written, &change_status,
3798                                                  &add_status);
3799                 if (status)
3800                         break;
3801
3802                 offset += data_written;
3803                 data_ptr += data_written;
3804                 image_size -= data_written;
3805         }
3806
3807         if (!status) {
3808                 /* Commit the FW written */
3809                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3810                                                  0, offset,
3811                                                  LANCER_FW_DOWNLOAD_LOCATION,
3812                                                  &data_written, &change_status,
3813                                                  &add_status);
3814         }
3815
3816         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3817                                 flash_cmd.dma);
3818         if (status) {
3819                 dev_err(&adapter->pdev->dev,
3820                         "Firmware load error. "
3821                         "Status code: 0x%x Additional Status: 0x%x\n",
3822                         status, add_status);
3823                 goto lancer_fw_exit;
3824         }
3825
3826         if (change_status == LANCER_FW_RESET_NEEDED) {
3827                 dev_info(&adapter->pdev->dev,
3828                          "Resetting adapter to activate new FW\n");
3829                 status = lancer_physdev_ctrl(adapter,
3830                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3831                 if (status) {
3832                         dev_err(&adapter->pdev->dev,
3833                                 "Adapter busy for FW reset.\n"
3834                                 "New FW will not be active.\n");
3835                         goto lancer_fw_exit;
3836                 }
3837         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3838                         dev_err(&adapter->pdev->dev,
3839                                 "System reboot required for new FW"
3840                                 " to be active\n");
3841         }
3842
3843         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3844 lancer_fw_exit:
3845         return status;
3846 }
3847
3848 #define UFI_TYPE2               2
3849 #define UFI_TYPE3               3
3850 #define UFI_TYPE3R              10
3851 #define UFI_TYPE4               4
3852 static int be_get_ufi_type(struct be_adapter *adapter,
3853                            struct flash_file_hdr_g3 *fhdr)
3854 {
3855         if (fhdr == NULL)
3856                 goto be_get_ufi_exit;
3857
3858         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3859                 return UFI_TYPE4;
3860         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3861                 if (fhdr->asic_type_rev == 0x10)
3862                         return UFI_TYPE3R;
3863                 else
3864                         return UFI_TYPE3;
3865         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3866                 return UFI_TYPE2;
3867
3868 be_get_ufi_exit:
3869         dev_err(&adapter->pdev->dev,
3870                 "UFI and Interface are not compatible for flashing\n");
3871         return -1;
3872 }
3873
3874 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3875 {
3876         struct flash_file_hdr_g3 *fhdr3;
3877         struct image_hdr *img_hdr_ptr = NULL;
3878         struct be_dma_mem flash_cmd;
3879         const u8 *p;
3880         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3881
3882         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3883         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3884                                           &flash_cmd.dma, GFP_KERNEL);
3885         if (!flash_cmd.va) {
3886                 status = -ENOMEM;
3887                 goto be_fw_exit;
3888         }
3889
3890         p = fw->data;
3891         fhdr3 = (struct flash_file_hdr_g3 *)p;
3892
3893         ufi_type = be_get_ufi_type(adapter, fhdr3);
3894
3895         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3896         for (i = 0; i < num_imgs; i++) {
3897                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3898                                 (sizeof(struct flash_file_hdr_g3) +
3899                                  i * sizeof(struct image_hdr)));
3900                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3901                         switch (ufi_type) {
3902                         case UFI_TYPE4:
3903                                 status = be_flash_skyhawk(adapter, fw,
3904                                                         &flash_cmd, num_imgs);
3905                                 break;
3906                         case UFI_TYPE3R:
3907                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3908                                                       num_imgs);
3909                                 break;
3910                         case UFI_TYPE3:
3911                                 /* Do not flash this ufi on BE3-R cards */
3912                                 if (adapter->asic_rev < 0x10)
3913                                         status = be_flash_BEx(adapter, fw,
3914                                                               &flash_cmd,
3915                                                               num_imgs);
3916                                 else {
3917                                         status = -1;
3918                                         dev_err(&adapter->pdev->dev,
3919                                                 "Can't load BE3 UFI on BE3R\n");
3920                                 }
3921                         }
3922                 }
3923         }
3924
3925         if (ufi_type == UFI_TYPE2)
3926                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3927         else if (ufi_type == -1)
3928                 status = -1;
3929
3930         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3931                           flash_cmd.dma);
3932         if (status) {
3933                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3934                 goto be_fw_exit;
3935         }
3936
3937         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3938
3939 be_fw_exit:
3940         return status;
3941 }
3942
3943 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3944 {
3945         const struct firmware *fw;
3946         int status;
3947
3948         if (!netif_running(adapter->netdev)) {
3949                 dev_err(&adapter->pdev->dev,
3950                         "Firmware load not allowed (interface is down)\n");
3951                 return -1;
3952         }
3953
3954         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3955         if (status)
3956                 goto fw_exit;
3957
3958         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3959
3960         if (lancer_chip(adapter))
3961                 status = lancer_fw_download(adapter, fw);
3962         else
3963                 status = be_fw_download(adapter, fw);
3964
3965         if (!status)
3966                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3967                                   adapter->fw_on_flash);
3968
3969 fw_exit:
3970         release_firmware(fw);
3971         return status;
3972 }
3973
3974 static int be_ndo_bridge_setlink(struct net_device *dev,
3975                                     struct nlmsghdr *nlh)
3976 {
3977         struct be_adapter *adapter = netdev_priv(dev);
3978         struct nlattr *attr, *br_spec;
3979         int rem;
3980         int status = 0;
3981         u16 mode = 0;
3982
3983         if (!sriov_enabled(adapter))
3984                 return -EOPNOTSUPP;
3985
3986         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3987
3988         nla_for_each_nested(attr, br_spec, rem) {
3989                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3990                         continue;
3991
3992                 mode = nla_get_u16(attr);
3993                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3994                         return -EINVAL;
3995
3996                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3997                                                adapter->if_handle,
3998                                                mode == BRIDGE_MODE_VEPA ?
3999                                                PORT_FWD_TYPE_VEPA :
4000                                                PORT_FWD_TYPE_VEB);
4001                 if (status)
4002                         goto err;
4003
4004                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4005                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4006
4007                 return status;
4008         }
4009 err:
4010         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4011                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4012
4013         return status;
4014 }
4015
4016 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4017                                     struct net_device *dev,
4018                                     u32 filter_mask)
4019 {
4020         struct be_adapter *adapter = netdev_priv(dev);
4021         int status = 0;
4022         u8 hsw_mode;
4023
4024         if (!sriov_enabled(adapter))
4025                 return 0;
4026
4027         /* BE and Lancer chips support VEB mode only */
4028         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4029                 hsw_mode = PORT_FWD_TYPE_VEB;
4030         } else {
4031                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4032                                                adapter->if_handle, &hsw_mode);
4033                 if (status)
4034                         return 0;
4035         }
4036
4037         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4038                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4039                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4040 }
4041
4042 static const struct net_device_ops be_netdev_ops = {
4043         .ndo_open               = be_open,
4044         .ndo_stop               = be_close,
4045         .ndo_start_xmit         = be_xmit,
4046         .ndo_set_rx_mode        = be_set_rx_mode,
4047         .ndo_set_mac_address    = be_mac_addr_set,
4048         .ndo_change_mtu         = be_change_mtu,
4049         .ndo_get_stats64        = be_get_stats64,
4050         .ndo_validate_addr      = eth_validate_addr,
4051         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4052         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4053         .ndo_set_vf_mac         = be_set_vf_mac,
4054         .ndo_set_vf_vlan        = be_set_vf_vlan,
4055         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4056         .ndo_get_vf_config      = be_get_vf_config,
4057 #ifdef CONFIG_NET_POLL_CONTROLLER
4058         .ndo_poll_controller    = be_netpoll,
4059 #endif
4060         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4061         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4062 #ifdef CONFIG_NET_RX_BUSY_POLL
4063         .ndo_busy_poll          = be_busy_poll
4064 #endif
4065 };
4066
4067 static void be_netdev_init(struct net_device *netdev)
4068 {
4069         struct be_adapter *adapter = netdev_priv(netdev);
4070
4071         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4072                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4073                 NETIF_F_HW_VLAN_CTAG_TX;
4074         if (be_multi_rxq(adapter))
4075                 netdev->hw_features |= NETIF_F_RXHASH;
4076
4077         netdev->features |= netdev->hw_features |
4078                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4079
4080         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4081                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4082
4083         netdev->priv_flags |= IFF_UNICAST_FLT;
4084
4085         netdev->flags |= IFF_MULTICAST;
4086
4087         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4088
4089         netdev->netdev_ops = &be_netdev_ops;
4090
4091         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4092 }
4093
4094 static void be_unmap_pci_bars(struct be_adapter *adapter)
4095 {
4096         if (adapter->csr)
4097                 pci_iounmap(adapter->pdev, adapter->csr);
4098         if (adapter->db)
4099                 pci_iounmap(adapter->pdev, adapter->db);
4100 }
4101
4102 static int db_bar(struct be_adapter *adapter)
4103 {
4104         if (lancer_chip(adapter) || !be_physfn(adapter))
4105                 return 0;
4106         else
4107                 return 4;
4108 }
4109
4110 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4111 {
4112         if (skyhawk_chip(adapter)) {
4113                 adapter->roce_db.size = 4096;
4114                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4115                                                               db_bar(adapter));
4116                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4117                                                                db_bar(adapter));
4118         }
4119         return 0;
4120 }
4121
4122 static int be_map_pci_bars(struct be_adapter *adapter)
4123 {
4124         u8 __iomem *addr;
4125
4126         if (BEx_chip(adapter) && be_physfn(adapter)) {
4127                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4128                 if (adapter->csr == NULL)
4129                         return -ENOMEM;
4130         }
4131
4132         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4133         if (addr == NULL)
4134                 goto pci_map_err;
4135         adapter->db = addr;
4136
4137         be_roce_map_pci_bars(adapter);
4138         return 0;
4139
4140 pci_map_err:
4141         be_unmap_pci_bars(adapter);
4142         return -ENOMEM;
4143 }
4144
4145 static void be_ctrl_cleanup(struct be_adapter *adapter)
4146 {
4147         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4148
4149         be_unmap_pci_bars(adapter);
4150
4151         if (mem->va)
4152                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4153                                   mem->dma);
4154
4155         mem = &adapter->rx_filter;
4156         if (mem->va)
4157                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4158                                   mem->dma);
4159 }
4160
4161 static int be_ctrl_init(struct be_adapter *adapter)
4162 {
4163         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4164         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4165         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4166         u32 sli_intf;
4167         int status;
4168
4169         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4170         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4171                                  SLI_INTF_FAMILY_SHIFT;
4172         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4173
4174         status = be_map_pci_bars(adapter);
4175         if (status)
4176                 goto done;
4177
4178         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4179         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4180                                                 mbox_mem_alloc->size,
4181                                                 &mbox_mem_alloc->dma,
4182                                                 GFP_KERNEL);
4183         if (!mbox_mem_alloc->va) {
4184                 status = -ENOMEM;
4185                 goto unmap_pci_bars;
4186         }
4187         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4188         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4189         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4190         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4191
4192         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4193         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4194                                             rx_filter->size, &rx_filter->dma,
4195                                             GFP_KERNEL);
4196         if (rx_filter->va == NULL) {
4197                 status = -ENOMEM;
4198                 goto free_mbox;
4199         }
4200
4201         mutex_init(&adapter->mbox_lock);
4202         spin_lock_init(&adapter->mcc_lock);
4203         spin_lock_init(&adapter->mcc_cq_lock);
4204
4205         init_completion(&adapter->et_cmd_compl);
4206         pci_save_state(adapter->pdev);
4207         return 0;
4208
4209 free_mbox:
4210         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4211                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4212
4213 unmap_pci_bars:
4214         be_unmap_pci_bars(adapter);
4215
4216 done:
4217         return status;
4218 }
4219
4220 static void be_stats_cleanup(struct be_adapter *adapter)
4221 {
4222         struct be_dma_mem *cmd = &adapter->stats_cmd;
4223
4224         if (cmd->va)
4225                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4226                                   cmd->va, cmd->dma);
4227 }
4228
4229 static int be_stats_init(struct be_adapter *adapter)
4230 {
4231         struct be_dma_mem *cmd = &adapter->stats_cmd;
4232
4233         if (lancer_chip(adapter))
4234                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4235         else if (BE2_chip(adapter))
4236                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4237         else if (BE3_chip(adapter))
4238                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4239         else
4240                 /* ALL non-BE ASICs */
4241                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4242
4243         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4244                                       GFP_KERNEL);
4245         if (cmd->va == NULL)
4246                 return -1;
4247         return 0;
4248 }
4249
4250 static void be_remove(struct pci_dev *pdev)
4251 {
4252         struct be_adapter *adapter = pci_get_drvdata(pdev);
4253
4254         if (!adapter)
4255                 return;
4256
4257         be_roce_dev_remove(adapter);
4258         be_intr_set(adapter, false);
4259
4260         cancel_delayed_work_sync(&adapter->func_recovery_work);
4261
4262         unregister_netdev(adapter->netdev);
4263
4264         be_clear(adapter);
4265
4266         /* tell fw we're done with firing cmds */
4267         be_cmd_fw_clean(adapter);
4268
4269         be_stats_cleanup(adapter);
4270
4271         be_ctrl_cleanup(adapter);
4272
4273         pci_disable_pcie_error_reporting(pdev);
4274
4275         pci_release_regions(pdev);
4276         pci_disable_device(pdev);
4277
4278         free_netdev(adapter->netdev);
4279 }
4280
4281 static int be_get_initial_config(struct be_adapter *adapter)
4282 {
4283         int status, level;
4284
4285         status = be_cmd_get_cntl_attributes(adapter);
4286         if (status)
4287                 return status;
4288
4289         /* Must be a power of 2 or else MODULO will BUG_ON */
4290         adapter->be_get_temp_freq = 64;
4291
4292         if (BEx_chip(adapter)) {
4293                 level = be_cmd_get_fw_log_level(adapter);
4294                 adapter->msg_enable =
4295                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4296         }
4297
4298         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4299         return 0;
4300 }
4301
4302 static int lancer_recover_func(struct be_adapter *adapter)
4303 {
4304         struct device *dev = &adapter->pdev->dev;
4305         int status;
4306
4307         status = lancer_test_and_set_rdy_state(adapter);
4308         if (status)
4309                 goto err;
4310
4311         if (netif_running(adapter->netdev))
4312                 be_close(adapter->netdev);
4313
4314         be_clear(adapter);
4315
4316         be_clear_all_error(adapter);
4317
4318         status = be_setup(adapter);
4319         if (status)
4320                 goto err;
4321
4322         if (netif_running(adapter->netdev)) {
4323                 status = be_open(adapter->netdev);
4324                 if (status)
4325                         goto err;
4326         }
4327
4328         dev_err(dev, "Adapter recovery successful\n");
4329         return 0;
4330 err:
4331         if (status == -EAGAIN)
4332                 dev_err(dev, "Waiting for resource provisioning\n");
4333         else
4334                 dev_err(dev, "Adapter recovery failed\n");
4335
4336         return status;
4337 }
4338
4339 static void be_func_recovery_task(struct work_struct *work)
4340 {
4341         struct be_adapter *adapter =
4342                 container_of(work, struct be_adapter,  func_recovery_work.work);
4343         int status = 0;
4344
4345         be_detect_error(adapter);
4346
4347         if (adapter->hw_error && lancer_chip(adapter)) {
4348
4349                 rtnl_lock();
4350                 netif_device_detach(adapter->netdev);
4351                 rtnl_unlock();
4352
4353                 status = lancer_recover_func(adapter);
4354                 if (!status)
4355                         netif_device_attach(adapter->netdev);
4356         }
4357
4358         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4359          * no need to attempt further recovery.
4360          */
4361         if (!status || status == -EAGAIN)
4362                 schedule_delayed_work(&adapter->func_recovery_work,
4363                                       msecs_to_jiffies(1000));
4364 }
4365
4366 static void be_worker(struct work_struct *work)
4367 {
4368         struct be_adapter *adapter =
4369                 container_of(work, struct be_adapter, work.work);
4370         struct be_rx_obj *rxo;
4371         int i;
4372
4373         /* when interrupts are not yet enabled, just reap any pending
4374         * mcc completions */
4375         if (!netif_running(adapter->netdev)) {
4376                 local_bh_disable();
4377                 be_process_mcc(adapter);
4378                 local_bh_enable();
4379                 goto reschedule;
4380         }
4381
4382         if (!adapter->stats_cmd_sent) {
4383                 if (lancer_chip(adapter))
4384                         lancer_cmd_get_pport_stats(adapter,
4385                                                 &adapter->stats_cmd);
4386                 else
4387                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4388         }
4389
4390         if (be_physfn(adapter) &&
4391             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4392                 be_cmd_get_die_temperature(adapter);
4393
4394         for_all_rx_queues(adapter, rxo, i) {
4395                 /* Replenish RX-queues starved due to memory
4396                  * allocation failures.
4397                  */
4398                 if (rxo->rx_post_starved)
4399                         be_post_rx_frags(rxo, GFP_KERNEL);
4400         }
4401
4402         be_eqd_update(adapter);
4403
4404 reschedule:
4405         adapter->work_counter++;
4406         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4407 }
4408
4409 /* If any VFs are already enabled don't FLR the PF */
4410 static bool be_reset_required(struct be_adapter *adapter)
4411 {
4412         return pci_num_vf(adapter->pdev) ? false : true;
4413 }
4414
4415 static char *mc_name(struct be_adapter *adapter)
4416 {
4417         if (adapter->function_mode & FLEX10_MODE)
4418                 return "FLEX10";
4419         else if (adapter->function_mode & VNIC_MODE)
4420                 return "vNIC";
4421         else if (adapter->function_mode & UMC_ENABLED)
4422                 return "UMC";
4423         else
4424                 return "";
4425 }
4426
4427 static inline char *func_name(struct be_adapter *adapter)
4428 {
4429         return be_physfn(adapter) ? "PF" : "VF";
4430 }
4431
4432 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4433 {
4434         int status = 0;
4435         struct be_adapter *adapter;
4436         struct net_device *netdev;
4437         char port_name;
4438
4439         status = pci_enable_device(pdev);
4440         if (status)
4441                 goto do_none;
4442
4443         status = pci_request_regions(pdev, DRV_NAME);
4444         if (status)
4445                 goto disable_dev;
4446         pci_set_master(pdev);
4447
4448         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4449         if (netdev == NULL) {
4450                 status = -ENOMEM;
4451                 goto rel_reg;
4452         }
4453         adapter = netdev_priv(netdev);
4454         adapter->pdev = pdev;
4455         pci_set_drvdata(pdev, adapter);
4456         adapter->netdev = netdev;
4457         SET_NETDEV_DEV(netdev, &pdev->dev);
4458
4459         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4460         if (!status) {
4461                 netdev->features |= NETIF_F_HIGHDMA;
4462         } else {
4463                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4464                 if (status) {
4465                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4466                         goto free_netdev;
4467                 }
4468         }
4469
4470         if (be_physfn(adapter)) {
4471                 status = pci_enable_pcie_error_reporting(pdev);
4472                 if (!status)
4473                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4474         }
4475
4476         status = be_ctrl_init(adapter);
4477         if (status)
4478                 goto free_netdev;
4479
4480         /* sync up with fw's ready state */
4481         if (be_physfn(adapter)) {
4482                 status = be_fw_wait_ready(adapter);
4483                 if (status)
4484                         goto ctrl_clean;
4485         }
4486
4487         if (be_reset_required(adapter)) {
4488                 status = be_cmd_reset_function(adapter);
4489                 if (status)
4490                         goto ctrl_clean;
4491
4492                 /* Wait for interrupts to quiesce after an FLR */
4493                 msleep(100);
4494         }
4495
4496         /* Allow interrupts for other ULPs running on NIC function */
4497         be_intr_set(adapter, true);
4498
4499         /* tell fw we're ready to fire cmds */
4500         status = be_cmd_fw_init(adapter);
4501         if (status)
4502                 goto ctrl_clean;
4503
4504         status = be_stats_init(adapter);
4505         if (status)
4506                 goto ctrl_clean;
4507
4508         status = be_get_initial_config(adapter);
4509         if (status)
4510                 goto stats_clean;
4511
4512         INIT_DELAYED_WORK(&adapter->work, be_worker);
4513         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4514         adapter->rx_fc = adapter->tx_fc = true;
4515
4516         status = be_setup(adapter);
4517         if (status)
4518                 goto stats_clean;
4519
4520         be_netdev_init(netdev);
4521         status = register_netdev(netdev);
4522         if (status != 0)
4523                 goto unsetup;
4524
4525         be_roce_dev_add(adapter);
4526
4527         schedule_delayed_work(&adapter->func_recovery_work,
4528                               msecs_to_jiffies(1000));
4529
4530         be_cmd_query_port_name(adapter, &port_name);
4531
4532         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4533                  func_name(adapter), mc_name(adapter), port_name);
4534
4535         return 0;
4536
4537 unsetup:
4538         be_clear(adapter);
4539 stats_clean:
4540         be_stats_cleanup(adapter);
4541 ctrl_clean:
4542         be_ctrl_cleanup(adapter);
4543 free_netdev:
4544         free_netdev(netdev);
4545 rel_reg:
4546         pci_release_regions(pdev);
4547 disable_dev:
4548         pci_disable_device(pdev);
4549 do_none:
4550         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4551         return status;
4552 }
4553
4554 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4555 {
4556         struct be_adapter *adapter = pci_get_drvdata(pdev);
4557         struct net_device *netdev =  adapter->netdev;
4558
4559         if (adapter->wol_en)
4560                 be_setup_wol(adapter, true);
4561
4562         be_intr_set(adapter, false);
4563         cancel_delayed_work_sync(&adapter->func_recovery_work);
4564
4565         netif_device_detach(netdev);
4566         if (netif_running(netdev)) {
4567                 rtnl_lock();
4568                 be_close(netdev);
4569                 rtnl_unlock();
4570         }
4571         be_clear(adapter);
4572
4573         pci_save_state(pdev);
4574         pci_disable_device(pdev);
4575         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4576         return 0;
4577 }
4578
4579 static int be_resume(struct pci_dev *pdev)
4580 {
4581         int status = 0;
4582         struct be_adapter *adapter = pci_get_drvdata(pdev);
4583         struct net_device *netdev =  adapter->netdev;
4584
4585         netif_device_detach(netdev);
4586
4587         status = pci_enable_device(pdev);
4588         if (status)
4589                 return status;
4590
4591         pci_set_power_state(pdev, PCI_D0);
4592         pci_restore_state(pdev);
4593
4594         status = be_fw_wait_ready(adapter);
4595         if (status)
4596                 return status;
4597
4598         be_intr_set(adapter, true);
4599         /* tell fw we're ready to fire cmds */
4600         status = be_cmd_fw_init(adapter);
4601         if (status)
4602                 return status;
4603
4604         be_setup(adapter);
4605         if (netif_running(netdev)) {
4606                 rtnl_lock();
4607                 be_open(netdev);
4608                 rtnl_unlock();
4609         }
4610
4611         schedule_delayed_work(&adapter->func_recovery_work,
4612                               msecs_to_jiffies(1000));
4613         netif_device_attach(netdev);
4614
4615         if (adapter->wol_en)
4616                 be_setup_wol(adapter, false);
4617
4618         return 0;
4619 }
4620
4621 /*
4622  * An FLR will stop BE from DMAing any data.
4623  */
4624 static void be_shutdown(struct pci_dev *pdev)
4625 {
4626         struct be_adapter *adapter = pci_get_drvdata(pdev);
4627
4628         if (!adapter)
4629                 return;
4630
4631         cancel_delayed_work_sync(&adapter->work);
4632         cancel_delayed_work_sync(&adapter->func_recovery_work);
4633
4634         netif_device_detach(adapter->netdev);
4635
4636         be_cmd_reset_function(adapter);
4637
4638         pci_disable_device(pdev);
4639 }
4640
4641 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4642                                 pci_channel_state_t state)
4643 {
4644         struct be_adapter *adapter = pci_get_drvdata(pdev);
4645         struct net_device *netdev =  adapter->netdev;
4646
4647         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4648
4649         if (!adapter->eeh_error) {
4650                 adapter->eeh_error = true;
4651
4652                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4653
4654                 rtnl_lock();
4655                 netif_device_detach(netdev);
4656                 if (netif_running(netdev))
4657                         be_close(netdev);
4658                 rtnl_unlock();
4659
4660                 be_clear(adapter);
4661         }
4662
4663         if (state == pci_channel_io_perm_failure)
4664                 return PCI_ERS_RESULT_DISCONNECT;
4665
4666         pci_disable_device(pdev);
4667
4668         /* The error could cause the FW to trigger a flash debug dump.
4669          * Resetting the card while flash dump is in progress
4670          * can cause it not to recover; wait for it to finish.
4671          * Wait only for first function as it is needed only once per
4672          * adapter.
4673          */
4674         if (pdev->devfn == 0)
4675                 ssleep(30);
4676
4677         return PCI_ERS_RESULT_NEED_RESET;
4678 }
4679
4680 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4681 {
4682         struct be_adapter *adapter = pci_get_drvdata(pdev);
4683         int status;
4684
4685         dev_info(&adapter->pdev->dev, "EEH reset\n");
4686
4687         status = pci_enable_device(pdev);
4688         if (status)
4689                 return PCI_ERS_RESULT_DISCONNECT;
4690
4691         pci_set_master(pdev);
4692         pci_set_power_state(pdev, PCI_D0);
4693         pci_restore_state(pdev);
4694
4695         /* Check if card is ok and fw is ready */
4696         dev_info(&adapter->pdev->dev,
4697                  "Waiting for FW to be ready after EEH reset\n");
4698         status = be_fw_wait_ready(adapter);
4699         if (status)
4700                 return PCI_ERS_RESULT_DISCONNECT;
4701
4702         pci_cleanup_aer_uncorrect_error_status(pdev);
4703         be_clear_all_error(adapter);
4704         return PCI_ERS_RESULT_RECOVERED;
4705 }
4706
4707 static void be_eeh_resume(struct pci_dev *pdev)
4708 {
4709         int status = 0;
4710         struct be_adapter *adapter = pci_get_drvdata(pdev);
4711         struct net_device *netdev =  adapter->netdev;
4712
4713         dev_info(&adapter->pdev->dev, "EEH resume\n");
4714
4715         pci_save_state(pdev);
4716
4717         status = be_cmd_reset_function(adapter);
4718         if (status)
4719                 goto err;
4720
4721         /* tell fw we're ready to fire cmds */
4722         status = be_cmd_fw_init(adapter);
4723         if (status)
4724                 goto err;
4725
4726         status = be_setup(adapter);
4727         if (status)
4728                 goto err;
4729
4730         if (netif_running(netdev)) {
4731                 status = be_open(netdev);
4732                 if (status)
4733                         goto err;
4734         }
4735
4736         schedule_delayed_work(&adapter->func_recovery_work,
4737                               msecs_to_jiffies(1000));
4738         netif_device_attach(netdev);
4739         return;
4740 err:
4741         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4742 }
4743
4744 static const struct pci_error_handlers be_eeh_handlers = {
4745         .error_detected = be_eeh_err_detected,
4746         .slot_reset = be_eeh_reset,
4747         .resume = be_eeh_resume,
4748 };
4749
4750 static struct pci_driver be_driver = {
4751         .name = DRV_NAME,
4752         .id_table = be_dev_ids,
4753         .probe = be_probe,
4754         .remove = be_remove,
4755         .suspend = be_suspend,
4756         .resume = be_resume,
4757         .shutdown = be_shutdown,
4758         .err_handler = &be_eeh_handlers
4759 };
4760
4761 static int __init be_init_module(void)
4762 {
4763         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4764             rx_frag_size != 2048) {
4765                 printk(KERN_WARNING DRV_NAME
4766                         " : Module param rx_frag_size must be 2048/4096/8192."
4767                         " Using 2048\n");
4768                 rx_frag_size = 2048;
4769         }
4770
4771         return pci_register_driver(&be_driver);
4772 }
4773 module_init(be_init_module);
4774
4775 static void __exit be_exit_module(void)
4776 {
4777         pci_unregister_driver(&be_driver);
4778 }
4779 module_exit(be_exit_module);