]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/emulex/benet/be_main.c
be2net: Fix to reset transparent vlan tagging
[~andy/linux] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917                                            struct sk_buff *skb,
918                                            bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925          * may cause a transmit stall on that port. So the work-around is to
926          * pad short packets (<= 32 bytes) to a 36-byte length.
927          */
928         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
929                 if (skb_padto(skb, 36))
930                         goto tx_drop;
931                 skb->len = 36;
932         }
933
934         /* For padded packets, BE HW modifies tot_len field in IP header
935          * incorrecly when VLAN tag is inserted by HW.
936          * For padded packets, Lancer computes incorrect checksum.
937          */
938         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939                                                 VLAN_ETH_HLEN : ETH_HLEN;
940         if (skb->len <= 60 &&
941             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
942             is_ipv4_pkt(skb)) {
943                 ip = (struct iphdr *)ip_hdr(skb);
944                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945         }
946
947         /* If vlan tag is already inlined in the packet, skip HW VLAN
948          * tagging in UMC mode
949          */
950         if ((adapter->function_mode & UMC_ENABLED) &&
951             veh->h_vlan_proto == htons(ETH_P_8021Q))
952                         *skip_hw_vlan = true;
953
954         /* HW has a bug wherein it will calculate CSUM for VLAN
955          * pkts even though it is disabled.
956          * Manually insert VLAN in pkt.
957          */
958         if (skb->ip_summed != CHECKSUM_PARTIAL &&
959             vlan_tx_tag_present(skb)) {
960                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
961                 if (unlikely(!skb))
962                         goto tx_drop;
963         }
964
965         /* HW may lockup when VLAN HW tagging is requested on
966          * certain ipv6 packets. Drop such pkts if the HW workaround to
967          * skip HW tagging is not enabled by FW.
968          */
969         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
970             (adapter->pvid || adapter->qnq_vid) &&
971             !qnq_async_evt_rcvd(adapter)))
972                 goto tx_drop;
973
974         /* Manual VLAN tag insertion to prevent:
975          * ASIC lockup when the ASIC inserts VLAN tag into
976          * certain ipv6 packets. Insert VLAN tags in driver,
977          * and set event, completion, vlan bits accordingly
978          * in the Tx WRB.
979          */
980         if (be_ipv6_tx_stall_chk(adapter, skb) &&
981             be_vlan_tag_tx_chk(adapter, skb)) {
982                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
983                 if (unlikely(!skb))
984                         goto tx_drop;
985         }
986
987         return skb;
988 tx_drop:
989         dev_kfree_skb_any(skb);
990         return NULL;
991 }
992
993 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994 {
995         struct be_adapter *adapter = netdev_priv(netdev);
996         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997         struct be_queue_info *txq = &txo->q;
998         bool dummy_wrb, stopped = false;
999         u32 wrb_cnt = 0, copied = 0;
1000         bool skip_hw_vlan = false;
1001         u32 start = txq->head;
1002
1003         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1004         if (!skb) {
1005                 tx_stats(txo)->tx_drv_drops++;
1006                 return NETDEV_TX_OK;
1007         }
1008
1009         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1010
1011         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012                               skip_hw_vlan);
1013         if (copied) {
1014                 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
1016                 /* record the sent skb in the sent_skb table */
1017                 BUG_ON(txo->sent_skb_list[start]);
1018                 txo->sent_skb_list[start] = skb;
1019
1020                 /* Ensure txq has space for the next skb; Else stop the queue
1021                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1022                  * tx compls of the current transmit which'll wake up the queue
1023                  */
1024                 atomic_add(wrb_cnt, &txq->used);
1025                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026                                                                 txq->len) {
1027                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1028                         stopped = true;
1029                 }
1030
1031                 be_txq_notify(adapter, txo, wrb_cnt);
1032
1033                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1034         } else {
1035                 txq->head = start;
1036                 tx_stats(txo)->tx_drv_drops++;
1037                 dev_kfree_skb_any(skb);
1038         }
1039         return NETDEV_TX_OK;
1040 }
1041
1042 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043 {
1044         struct be_adapter *adapter = netdev_priv(netdev);
1045         if (new_mtu < BE_MIN_MTU ||
1046                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047                                         (ETH_HLEN + ETH_FCS_LEN))) {
1048                 dev_info(&adapter->pdev->dev,
1049                         "MTU must be between %d and %d bytes\n",
1050                         BE_MIN_MTU,
1051                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1052                 return -EINVAL;
1053         }
1054         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055                         netdev->mtu, new_mtu);
1056         netdev->mtu = new_mtu;
1057         return 0;
1058 }
1059
1060 /*
1061  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062  * If the user configures more, place BE in vlan promiscuous mode.
1063  */
1064 static int be_vid_config(struct be_adapter *adapter)
1065 {
1066         u16 vids[BE_NUM_VLANS_SUPPORTED];
1067         u16 num = 0, i;
1068         int status = 0;
1069
1070         /* No need to further configure vids if in promiscuous mode */
1071         if (adapter->promiscuous)
1072                 return 0;
1073
1074         if (adapter->vlans_added > be_max_vlans(adapter))
1075                 goto set_vlan_promisc;
1076
1077         /* Construct VLAN Table to give to HW */
1078         for (i = 0; i < VLAN_N_VID; i++)
1079                 if (adapter->vlan_tag[i])
1080                         vids[num++] = cpu_to_le16(i);
1081
1082         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1083                                     vids, num, 0);
1084
1085         if (status) {
1086                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088                         goto set_vlan_promisc;
1089                 dev_err(&adapter->pdev->dev,
1090                         "Setting HW VLAN filtering failed.\n");
1091         } else {
1092                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093                         /* hw VLAN filtering re-enabled. */
1094                         status = be_cmd_rx_filter(adapter,
1095                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1096                         if (!status) {
1097                                 dev_info(&adapter->pdev->dev,
1098                                          "Disabling VLAN Promiscuous mode.\n");
1099                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1100                         }
1101                 }
1102         }
1103
1104         return status;
1105
1106 set_vlan_promisc:
1107         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1108                 return 0;
1109
1110         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1111         if (!status) {
1112                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1113                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1114         } else
1115                 dev_err(&adapter->pdev->dev,
1116                         "Failed to enable VLAN Promiscuous mode.\n");
1117         return status;
1118 }
1119
1120 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1121 {
1122         struct be_adapter *adapter = netdev_priv(netdev);
1123         int status = 0;
1124
1125         /* Packets with VID 0 are always received by Lancer by default */
1126         if (lancer_chip(adapter) && vid == 0)
1127                 goto ret;
1128
1129         adapter->vlan_tag[vid] = 1;
1130         adapter->vlans_added++;
1131
1132         status = be_vid_config(adapter);
1133         if (status) {
1134                 adapter->vlans_added--;
1135                 adapter->vlan_tag[vid] = 0;
1136         }
1137 ret:
1138         return status;
1139 }
1140
1141 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1142 {
1143         struct be_adapter *adapter = netdev_priv(netdev);
1144         int status = 0;
1145
1146         /* Packets with VID 0 are always received by Lancer by default */
1147         if (lancer_chip(adapter) && vid == 0)
1148                 goto ret;
1149
1150         adapter->vlan_tag[vid] = 0;
1151         status = be_vid_config(adapter);
1152         if (!status)
1153                 adapter->vlans_added--;
1154         else
1155                 adapter->vlan_tag[vid] = 1;
1156 ret:
1157         return status;
1158 }
1159
1160 static void be_set_rx_mode(struct net_device *netdev)
1161 {
1162         struct be_adapter *adapter = netdev_priv(netdev);
1163         int status;
1164
1165         if (netdev->flags & IFF_PROMISC) {
1166                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1167                 adapter->promiscuous = true;
1168                 goto done;
1169         }
1170
1171         /* BE was previously in promiscuous mode; disable it */
1172         if (adapter->promiscuous) {
1173                 adapter->promiscuous = false;
1174                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1175
1176                 if (adapter->vlans_added)
1177                         be_vid_config(adapter);
1178         }
1179
1180         /* Enable multicast promisc if num configured exceeds what we support */
1181         if (netdev->flags & IFF_ALLMULTI ||
1182             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1183                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1184                 goto done;
1185         }
1186
1187         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1188                 struct netdev_hw_addr *ha;
1189                 int i = 1; /* First slot is claimed by the Primary MAC */
1190
1191                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1192                         be_cmd_pmac_del(adapter, adapter->if_handle,
1193                                         adapter->pmac_id[i], 0);
1194                 }
1195
1196                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1197                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1198                         adapter->promiscuous = true;
1199                         goto done;
1200                 }
1201
1202                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1203                         adapter->uc_macs++; /* First slot is for Primary MAC */
1204                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1205                                         adapter->if_handle,
1206                                         &adapter->pmac_id[adapter->uc_macs], 0);
1207                 }
1208         }
1209
1210         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1211
1212         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1213         if (status) {
1214                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1215                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1216                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1217         }
1218 done:
1219         return;
1220 }
1221
1222 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1223 {
1224         struct be_adapter *adapter = netdev_priv(netdev);
1225         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1226         int status;
1227
1228         if (!sriov_enabled(adapter))
1229                 return -EPERM;
1230
1231         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1232                 return -EINVAL;
1233
1234         if (BEx_chip(adapter)) {
1235                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1236                                 vf + 1);
1237
1238                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1239                                          &vf_cfg->pmac_id, vf + 1);
1240         } else {
1241                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1242                                         vf + 1);
1243         }
1244
1245         if (status)
1246                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1247                                 mac, vf);
1248         else
1249                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1250
1251         return status;
1252 }
1253
1254 static int be_get_vf_config(struct net_device *netdev, int vf,
1255                         struct ifla_vf_info *vi)
1256 {
1257         struct be_adapter *adapter = netdev_priv(netdev);
1258         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1259
1260         if (!sriov_enabled(adapter))
1261                 return -EPERM;
1262
1263         if (vf >= adapter->num_vfs)
1264                 return -EINVAL;
1265
1266         vi->vf = vf;
1267         vi->tx_rate = vf_cfg->tx_rate;
1268         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1269         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1270         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1271
1272         return 0;
1273 }
1274
1275 static int be_set_vf_vlan(struct net_device *netdev,
1276                         int vf, u16 vlan, u8 qos)
1277 {
1278         struct be_adapter *adapter = netdev_priv(netdev);
1279         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1280         int status = 0;
1281
1282         if (!sriov_enabled(adapter))
1283                 return -EPERM;
1284
1285         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1286                 return -EINVAL;
1287
1288         if (vlan || qos) {
1289                 vlan |= qos << VLAN_PRIO_SHIFT;
1290                 if (vf_cfg->vlan_tag != vlan)
1291                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1292                                                        vf_cfg->if_handle, 0);
1293         } else {
1294                 /* Reset Transparent Vlan Tagging. */
1295                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1296                                                vf + 1, vf_cfg->if_handle, 0);
1297         }
1298
1299         if (!status)
1300                 vf_cfg->vlan_tag = vlan;
1301         else
1302                 dev_info(&adapter->pdev->dev,
1303                          "VLAN %d config on VF %d failed\n", vlan, vf);
1304         return status;
1305 }
1306
1307 static int be_set_vf_tx_rate(struct net_device *netdev,
1308                         int vf, int rate)
1309 {
1310         struct be_adapter *adapter = netdev_priv(netdev);
1311         int status = 0;
1312
1313         if (!sriov_enabled(adapter))
1314                 return -EPERM;
1315
1316         if (vf >= adapter->num_vfs)
1317                 return -EINVAL;
1318
1319         if (rate < 100 || rate > 10000) {
1320                 dev_err(&adapter->pdev->dev,
1321                         "tx rate must be between 100 and 10000 Mbps\n");
1322                 return -EINVAL;
1323         }
1324
1325         if (lancer_chip(adapter))
1326                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1327         else
1328                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1329
1330         if (status)
1331                 dev_err(&adapter->pdev->dev,
1332                                 "tx rate %d on VF %d failed\n", rate, vf);
1333         else
1334                 adapter->vf_cfg[vf].tx_rate = rate;
1335         return status;
1336 }
1337
1338 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1339                           ulong now)
1340 {
1341         aic->rx_pkts_prev = rx_pkts;
1342         aic->tx_reqs_prev = tx_pkts;
1343         aic->jiffies = now;
1344 }
1345
1346 static void be_eqd_update(struct be_adapter *adapter)
1347 {
1348         struct be_set_eqd set_eqd[MAX_EVT_QS];
1349         int eqd, i, num = 0, start;
1350         struct be_aic_obj *aic;
1351         struct be_eq_obj *eqo;
1352         struct be_rx_obj *rxo;
1353         struct be_tx_obj *txo;
1354         u64 rx_pkts, tx_pkts;
1355         ulong now;
1356         u32 pps, delta;
1357
1358         for_all_evt_queues(adapter, eqo, i) {
1359                 aic = &adapter->aic_obj[eqo->idx];
1360                 if (!aic->enable) {
1361                         if (aic->jiffies)
1362                                 aic->jiffies = 0;
1363                         eqd = aic->et_eqd;
1364                         goto modify_eqd;
1365                 }
1366
1367                 rxo = &adapter->rx_obj[eqo->idx];
1368                 do {
1369                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1370                         rx_pkts = rxo->stats.rx_pkts;
1371                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1372
1373                 txo = &adapter->tx_obj[eqo->idx];
1374                 do {
1375                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1376                         tx_pkts = txo->stats.tx_reqs;
1377                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1378
1379
1380                 /* Skip, if wrapped around or first calculation */
1381                 now = jiffies;
1382                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1383                     rx_pkts < aic->rx_pkts_prev ||
1384                     tx_pkts < aic->tx_reqs_prev) {
1385                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1386                         continue;
1387                 }
1388
1389                 delta = jiffies_to_msecs(now - aic->jiffies);
1390                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1391                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1392                 eqd = (pps / 15000) << 2;
1393
1394                 if (eqd < 8)
1395                         eqd = 0;
1396                 eqd = min_t(u32, eqd, aic->max_eqd);
1397                 eqd = max_t(u32, eqd, aic->min_eqd);
1398
1399                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1400 modify_eqd:
1401                 if (eqd != aic->prev_eqd) {
1402                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1403                         set_eqd[num].eq_id = eqo->q.id;
1404                         aic->prev_eqd = eqd;
1405                         num++;
1406                 }
1407         }
1408
1409         if (num)
1410                 be_cmd_modify_eqd(adapter, set_eqd, num);
1411 }
1412
1413 static void be_rx_stats_update(struct be_rx_obj *rxo,
1414                 struct be_rx_compl_info *rxcp)
1415 {
1416         struct be_rx_stats *stats = rx_stats(rxo);
1417
1418         u64_stats_update_begin(&stats->sync);
1419         stats->rx_compl++;
1420         stats->rx_bytes += rxcp->pkt_size;
1421         stats->rx_pkts++;
1422         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1423                 stats->rx_mcast_pkts++;
1424         if (rxcp->err)
1425                 stats->rx_compl_err++;
1426         u64_stats_update_end(&stats->sync);
1427 }
1428
1429 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1430 {
1431         /* L4 checksum is not reliable for non TCP/UDP packets.
1432          * Also ignore ipcksm for ipv6 pkts */
1433         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1434                                 (rxcp->ip_csum || rxcp->ipv6);
1435 }
1436
1437 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1438 {
1439         struct be_adapter *adapter = rxo->adapter;
1440         struct be_rx_page_info *rx_page_info;
1441         struct be_queue_info *rxq = &rxo->q;
1442         u16 frag_idx = rxq->tail;
1443
1444         rx_page_info = &rxo->page_info_tbl[frag_idx];
1445         BUG_ON(!rx_page_info->page);
1446
1447         if (rx_page_info->last_page_user) {
1448                 dma_unmap_page(&adapter->pdev->dev,
1449                                dma_unmap_addr(rx_page_info, bus),
1450                                adapter->big_page_size, DMA_FROM_DEVICE);
1451                 rx_page_info->last_page_user = false;
1452         }
1453
1454         queue_tail_inc(rxq);
1455         atomic_dec(&rxq->used);
1456         return rx_page_info;
1457 }
1458
1459 /* Throwaway the data in the Rx completion */
1460 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1461                                 struct be_rx_compl_info *rxcp)
1462 {
1463         struct be_rx_page_info *page_info;
1464         u16 i, num_rcvd = rxcp->num_rcvd;
1465
1466         for (i = 0; i < num_rcvd; i++) {
1467                 page_info = get_rx_page_info(rxo);
1468                 put_page(page_info->page);
1469                 memset(page_info, 0, sizeof(*page_info));
1470         }
1471 }
1472
1473 /*
1474  * skb_fill_rx_data forms a complete skb for an ether frame
1475  * indicated by rxcp.
1476  */
1477 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1478                              struct be_rx_compl_info *rxcp)
1479 {
1480         struct be_rx_page_info *page_info;
1481         u16 i, j;
1482         u16 hdr_len, curr_frag_len, remaining;
1483         u8 *start;
1484
1485         page_info = get_rx_page_info(rxo);
1486         start = page_address(page_info->page) + page_info->page_offset;
1487         prefetch(start);
1488
1489         /* Copy data in the first descriptor of this completion */
1490         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1491
1492         skb->len = curr_frag_len;
1493         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1494                 memcpy(skb->data, start, curr_frag_len);
1495                 /* Complete packet has now been moved to data */
1496                 put_page(page_info->page);
1497                 skb->data_len = 0;
1498                 skb->tail += curr_frag_len;
1499         } else {
1500                 hdr_len = ETH_HLEN;
1501                 memcpy(skb->data, start, hdr_len);
1502                 skb_shinfo(skb)->nr_frags = 1;
1503                 skb_frag_set_page(skb, 0, page_info->page);
1504                 skb_shinfo(skb)->frags[0].page_offset =
1505                                         page_info->page_offset + hdr_len;
1506                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1507                 skb->data_len = curr_frag_len - hdr_len;
1508                 skb->truesize += rx_frag_size;
1509                 skb->tail += hdr_len;
1510         }
1511         page_info->page = NULL;
1512
1513         if (rxcp->pkt_size <= rx_frag_size) {
1514                 BUG_ON(rxcp->num_rcvd != 1);
1515                 return;
1516         }
1517
1518         /* More frags present for this completion */
1519         remaining = rxcp->pkt_size - curr_frag_len;
1520         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1521                 page_info = get_rx_page_info(rxo);
1522                 curr_frag_len = min(remaining, rx_frag_size);
1523
1524                 /* Coalesce all frags from the same physical page in one slot */
1525                 if (page_info->page_offset == 0) {
1526                         /* Fresh page */
1527                         j++;
1528                         skb_frag_set_page(skb, j, page_info->page);
1529                         skb_shinfo(skb)->frags[j].page_offset =
1530                                                         page_info->page_offset;
1531                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1532                         skb_shinfo(skb)->nr_frags++;
1533                 } else {
1534                         put_page(page_info->page);
1535                 }
1536
1537                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1538                 skb->len += curr_frag_len;
1539                 skb->data_len += curr_frag_len;
1540                 skb->truesize += rx_frag_size;
1541                 remaining -= curr_frag_len;
1542                 page_info->page = NULL;
1543         }
1544         BUG_ON(j > MAX_SKB_FRAGS);
1545 }
1546
1547 /* Process the RX completion indicated by rxcp when GRO is disabled */
1548 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1549                                 struct be_rx_compl_info *rxcp)
1550 {
1551         struct be_adapter *adapter = rxo->adapter;
1552         struct net_device *netdev = adapter->netdev;
1553         struct sk_buff *skb;
1554
1555         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1556         if (unlikely(!skb)) {
1557                 rx_stats(rxo)->rx_drops_no_skbs++;
1558                 be_rx_compl_discard(rxo, rxcp);
1559                 return;
1560         }
1561
1562         skb_fill_rx_data(rxo, skb, rxcp);
1563
1564         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1565                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1566         else
1567                 skb_checksum_none_assert(skb);
1568
1569         skb->protocol = eth_type_trans(skb, netdev);
1570         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1571         if (netdev->features & NETIF_F_RXHASH)
1572                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1573         skb_mark_napi_id(skb, napi);
1574
1575         if (rxcp->vlanf)
1576                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1577
1578         netif_receive_skb(skb);
1579 }
1580
1581 /* Process the RX completion indicated by rxcp when GRO is enabled */
1582 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1583                                     struct napi_struct *napi,
1584                                     struct be_rx_compl_info *rxcp)
1585 {
1586         struct be_adapter *adapter = rxo->adapter;
1587         struct be_rx_page_info *page_info;
1588         struct sk_buff *skb = NULL;
1589         u16 remaining, curr_frag_len;
1590         u16 i, j;
1591
1592         skb = napi_get_frags(napi);
1593         if (!skb) {
1594                 be_rx_compl_discard(rxo, rxcp);
1595                 return;
1596         }
1597
1598         remaining = rxcp->pkt_size;
1599         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1600                 page_info = get_rx_page_info(rxo);
1601
1602                 curr_frag_len = min(remaining, rx_frag_size);
1603
1604                 /* Coalesce all frags from the same physical page in one slot */
1605                 if (i == 0 || page_info->page_offset == 0) {
1606                         /* First frag or Fresh page */
1607                         j++;
1608                         skb_frag_set_page(skb, j, page_info->page);
1609                         skb_shinfo(skb)->frags[j].page_offset =
1610                                                         page_info->page_offset;
1611                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1612                 } else {
1613                         put_page(page_info->page);
1614                 }
1615                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1616                 skb->truesize += rx_frag_size;
1617                 remaining -= curr_frag_len;
1618                 memset(page_info, 0, sizeof(*page_info));
1619         }
1620         BUG_ON(j > MAX_SKB_FRAGS);
1621
1622         skb_shinfo(skb)->nr_frags = j + 1;
1623         skb->len = rxcp->pkt_size;
1624         skb->data_len = rxcp->pkt_size;
1625         skb->ip_summed = CHECKSUM_UNNECESSARY;
1626         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1627         if (adapter->netdev->features & NETIF_F_RXHASH)
1628                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1629         skb_mark_napi_id(skb, napi);
1630
1631         if (rxcp->vlanf)
1632                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1633
1634         napi_gro_frags(napi);
1635 }
1636
1637 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1638                                  struct be_rx_compl_info *rxcp)
1639 {
1640         rxcp->pkt_size =
1641                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1642         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1643         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1644         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1645         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1646         rxcp->ip_csum =
1647                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1648         rxcp->l4_csum =
1649                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1650         rxcp->ipv6 =
1651                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1652         rxcp->num_rcvd =
1653                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1654         rxcp->pkt_type =
1655                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1656         rxcp->rss_hash =
1657                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1658         if (rxcp->vlanf) {
1659                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1660                                           compl);
1661                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1662                                                compl);
1663         }
1664         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1665 }
1666
1667 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1668                                  struct be_rx_compl_info *rxcp)
1669 {
1670         rxcp->pkt_size =
1671                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1672         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1673         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1674         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1675         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1676         rxcp->ip_csum =
1677                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1678         rxcp->l4_csum =
1679                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1680         rxcp->ipv6 =
1681                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1682         rxcp->num_rcvd =
1683                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1684         rxcp->pkt_type =
1685                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1686         rxcp->rss_hash =
1687                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1688         if (rxcp->vlanf) {
1689                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1690                                           compl);
1691                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1692                                                compl);
1693         }
1694         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1695         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1696                                       ip_frag, compl);
1697 }
1698
1699 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1700 {
1701         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1702         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1703         struct be_adapter *adapter = rxo->adapter;
1704
1705         /* For checking the valid bit it is Ok to use either definition as the
1706          * valid bit is at the same position in both v0 and v1 Rx compl */
1707         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1708                 return NULL;
1709
1710         rmb();
1711         be_dws_le_to_cpu(compl, sizeof(*compl));
1712
1713         if (adapter->be3_native)
1714                 be_parse_rx_compl_v1(compl, rxcp);
1715         else
1716                 be_parse_rx_compl_v0(compl, rxcp);
1717
1718         if (rxcp->ip_frag)
1719                 rxcp->l4_csum = 0;
1720
1721         if (rxcp->vlanf) {
1722                 /* vlanf could be wrongly set in some cards.
1723                  * ignore if vtm is not set */
1724                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1725                         rxcp->vlanf = 0;
1726
1727                 if (!lancer_chip(adapter))
1728                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1729
1730                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1731                     !adapter->vlan_tag[rxcp->vlan_tag])
1732                         rxcp->vlanf = 0;
1733         }
1734
1735         /* As the compl has been parsed, reset it; we wont touch it again */
1736         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1737
1738         queue_tail_inc(&rxo->cq);
1739         return rxcp;
1740 }
1741
1742 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1743 {
1744         u32 order = get_order(size);
1745
1746         if (order > 0)
1747                 gfp |= __GFP_COMP;
1748         return  alloc_pages(gfp, order);
1749 }
1750
1751 /*
1752  * Allocate a page, split it to fragments of size rx_frag_size and post as
1753  * receive buffers to BE
1754  */
1755 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1756 {
1757         struct be_adapter *adapter = rxo->adapter;
1758         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1759         struct be_queue_info *rxq = &rxo->q;
1760         struct page *pagep = NULL;
1761         struct device *dev = &adapter->pdev->dev;
1762         struct be_eth_rx_d *rxd;
1763         u64 page_dmaaddr = 0, frag_dmaaddr;
1764         u32 posted, page_offset = 0;
1765
1766         page_info = &rxo->page_info_tbl[rxq->head];
1767         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1768                 if (!pagep) {
1769                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1770                         if (unlikely(!pagep)) {
1771                                 rx_stats(rxo)->rx_post_fail++;
1772                                 break;
1773                         }
1774                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1775                                                     adapter->big_page_size,
1776                                                     DMA_FROM_DEVICE);
1777                         if (dma_mapping_error(dev, page_dmaaddr)) {
1778                                 put_page(pagep);
1779                                 pagep = NULL;
1780                                 rx_stats(rxo)->rx_post_fail++;
1781                                 break;
1782                         }
1783                         page_info->page_offset = 0;
1784                 } else {
1785                         get_page(pagep);
1786                         page_info->page_offset = page_offset + rx_frag_size;
1787                 }
1788                 page_offset = page_info->page_offset;
1789                 page_info->page = pagep;
1790                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1791                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1792
1793                 rxd = queue_head_node(rxq);
1794                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1795                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1796
1797                 /* Any space left in the current big page for another frag? */
1798                 if ((page_offset + rx_frag_size + rx_frag_size) >
1799                                         adapter->big_page_size) {
1800                         pagep = NULL;
1801                         page_info->last_page_user = true;
1802                 }
1803
1804                 prev_page_info = page_info;
1805                 queue_head_inc(rxq);
1806                 page_info = &rxo->page_info_tbl[rxq->head];
1807         }
1808         if (pagep)
1809                 prev_page_info->last_page_user = true;
1810
1811         if (posted) {
1812                 atomic_add(posted, &rxq->used);
1813                 if (rxo->rx_post_starved)
1814                         rxo->rx_post_starved = false;
1815                 be_rxq_notify(adapter, rxq->id, posted);
1816         } else if (atomic_read(&rxq->used) == 0) {
1817                 /* Let be_worker replenish when memory is available */
1818                 rxo->rx_post_starved = true;
1819         }
1820 }
1821
1822 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1823 {
1824         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1825
1826         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1827                 return NULL;
1828
1829         rmb();
1830         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1831
1832         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1833
1834         queue_tail_inc(tx_cq);
1835         return txcp;
1836 }
1837
1838 static u16 be_tx_compl_process(struct be_adapter *adapter,
1839                 struct be_tx_obj *txo, u16 last_index)
1840 {
1841         struct be_queue_info *txq = &txo->q;
1842         struct be_eth_wrb *wrb;
1843         struct sk_buff **sent_skbs = txo->sent_skb_list;
1844         struct sk_buff *sent_skb;
1845         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1846         bool unmap_skb_hdr = true;
1847
1848         sent_skb = sent_skbs[txq->tail];
1849         BUG_ON(!sent_skb);
1850         sent_skbs[txq->tail] = NULL;
1851
1852         /* skip header wrb */
1853         queue_tail_inc(txq);
1854
1855         do {
1856                 cur_index = txq->tail;
1857                 wrb = queue_tail_node(txq);
1858                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1859                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1860                 unmap_skb_hdr = false;
1861
1862                 num_wrbs++;
1863                 queue_tail_inc(txq);
1864         } while (cur_index != last_index);
1865
1866         kfree_skb(sent_skb);
1867         return num_wrbs;
1868 }
1869
1870 /* Return the number of events in the event queue */
1871 static inline int events_get(struct be_eq_obj *eqo)
1872 {
1873         struct be_eq_entry *eqe;
1874         int num = 0;
1875
1876         do {
1877                 eqe = queue_tail_node(&eqo->q);
1878                 if (eqe->evt == 0)
1879                         break;
1880
1881                 rmb();
1882                 eqe->evt = 0;
1883                 num++;
1884                 queue_tail_inc(&eqo->q);
1885         } while (true);
1886
1887         return num;
1888 }
1889
1890 /* Leaves the EQ is disarmed state */
1891 static void be_eq_clean(struct be_eq_obj *eqo)
1892 {
1893         int num = events_get(eqo);
1894
1895         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1896 }
1897
1898 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1899 {
1900         struct be_rx_page_info *page_info;
1901         struct be_queue_info *rxq = &rxo->q;
1902         struct be_queue_info *rx_cq = &rxo->cq;
1903         struct be_rx_compl_info *rxcp;
1904         struct be_adapter *adapter = rxo->adapter;
1905         int flush_wait = 0;
1906
1907         /* Consume pending rx completions.
1908          * Wait for the flush completion (identified by zero num_rcvd)
1909          * to arrive. Notify CQ even when there are no more CQ entries
1910          * for HW to flush partially coalesced CQ entries.
1911          * In Lancer, there is no need to wait for flush compl.
1912          */
1913         for (;;) {
1914                 rxcp = be_rx_compl_get(rxo);
1915                 if (rxcp == NULL) {
1916                         if (lancer_chip(adapter))
1917                                 break;
1918
1919                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1920                                 dev_warn(&adapter->pdev->dev,
1921                                          "did not receive flush compl\n");
1922                                 break;
1923                         }
1924                         be_cq_notify(adapter, rx_cq->id, true, 0);
1925                         mdelay(1);
1926                 } else {
1927                         be_rx_compl_discard(rxo, rxcp);
1928                         be_cq_notify(adapter, rx_cq->id, false, 1);
1929                         if (rxcp->num_rcvd == 0)
1930                                 break;
1931                 }
1932         }
1933
1934         /* After cleanup, leave the CQ in unarmed state */
1935         be_cq_notify(adapter, rx_cq->id, false, 0);
1936
1937         /* Then free posted rx buffers that were not used */
1938         while (atomic_read(&rxq->used) > 0) {
1939                 page_info = get_rx_page_info(rxo);
1940                 put_page(page_info->page);
1941                 memset(page_info, 0, sizeof(*page_info));
1942         }
1943         BUG_ON(atomic_read(&rxq->used));
1944         rxq->tail = rxq->head = 0;
1945 }
1946
1947 static void be_tx_compl_clean(struct be_adapter *adapter)
1948 {
1949         struct be_tx_obj *txo;
1950         struct be_queue_info *txq;
1951         struct be_eth_tx_compl *txcp;
1952         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1953         struct sk_buff *sent_skb;
1954         bool dummy_wrb;
1955         int i, pending_txqs;
1956
1957         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1958         do {
1959                 pending_txqs = adapter->num_tx_qs;
1960
1961                 for_all_tx_queues(adapter, txo, i) {
1962                         txq = &txo->q;
1963                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1964                                 end_idx =
1965                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1966                                                       wrb_index, txcp);
1967                                 num_wrbs += be_tx_compl_process(adapter, txo,
1968                                                                 end_idx);
1969                                 cmpl++;
1970                         }
1971                         if (cmpl) {
1972                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1973                                 atomic_sub(num_wrbs, &txq->used);
1974                                 cmpl = 0;
1975                                 num_wrbs = 0;
1976                         }
1977                         if (atomic_read(&txq->used) == 0)
1978                                 pending_txqs--;
1979                 }
1980
1981                 if (pending_txqs == 0 || ++timeo > 200)
1982                         break;
1983
1984                 mdelay(1);
1985         } while (true);
1986
1987         for_all_tx_queues(adapter, txo, i) {
1988                 txq = &txo->q;
1989                 if (atomic_read(&txq->used))
1990                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1991                                 atomic_read(&txq->used));
1992
1993                 /* free posted tx for which compls will never arrive */
1994                 while (atomic_read(&txq->used)) {
1995                         sent_skb = txo->sent_skb_list[txq->tail];
1996                         end_idx = txq->tail;
1997                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1998                                                    &dummy_wrb);
1999                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2000                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2001                         atomic_sub(num_wrbs, &txq->used);
2002                 }
2003         }
2004 }
2005
2006 static void be_evt_queues_destroy(struct be_adapter *adapter)
2007 {
2008         struct be_eq_obj *eqo;
2009         int i;
2010
2011         for_all_evt_queues(adapter, eqo, i) {
2012                 if (eqo->q.created) {
2013                         be_eq_clean(eqo);
2014                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2015                         napi_hash_del(&eqo->napi);
2016                         netif_napi_del(&eqo->napi);
2017                 }
2018                 be_queue_free(adapter, &eqo->q);
2019         }
2020 }
2021
2022 static int be_evt_queues_create(struct be_adapter *adapter)
2023 {
2024         struct be_queue_info *eq;
2025         struct be_eq_obj *eqo;
2026         struct be_aic_obj *aic;
2027         int i, rc;
2028
2029         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2030                                     adapter->cfg_num_qs);
2031
2032         for_all_evt_queues(adapter, eqo, i) {
2033                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2034                                BE_NAPI_WEIGHT);
2035                 napi_hash_add(&eqo->napi);
2036                 aic = &adapter->aic_obj[i];
2037                 eqo->adapter = adapter;
2038                 eqo->tx_budget = BE_TX_BUDGET;
2039                 eqo->idx = i;
2040                 aic->max_eqd = BE_MAX_EQD;
2041                 aic->enable = true;
2042
2043                 eq = &eqo->q;
2044                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2045                                         sizeof(struct be_eq_entry));
2046                 if (rc)
2047                         return rc;
2048
2049                 rc = be_cmd_eq_create(adapter, eqo);
2050                 if (rc)
2051                         return rc;
2052         }
2053         return 0;
2054 }
2055
2056 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2057 {
2058         struct be_queue_info *q;
2059
2060         q = &adapter->mcc_obj.q;
2061         if (q->created)
2062                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2063         be_queue_free(adapter, q);
2064
2065         q = &adapter->mcc_obj.cq;
2066         if (q->created)
2067                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2068         be_queue_free(adapter, q);
2069 }
2070
2071 /* Must be called only after TX qs are created as MCC shares TX EQ */
2072 static int be_mcc_queues_create(struct be_adapter *adapter)
2073 {
2074         struct be_queue_info *q, *cq;
2075
2076         cq = &adapter->mcc_obj.cq;
2077         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2078                         sizeof(struct be_mcc_compl)))
2079                 goto err;
2080
2081         /* Use the default EQ for MCC completions */
2082         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2083                 goto mcc_cq_free;
2084
2085         q = &adapter->mcc_obj.q;
2086         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2087                 goto mcc_cq_destroy;
2088
2089         if (be_cmd_mccq_create(adapter, q, cq))
2090                 goto mcc_q_free;
2091
2092         return 0;
2093
2094 mcc_q_free:
2095         be_queue_free(adapter, q);
2096 mcc_cq_destroy:
2097         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2098 mcc_cq_free:
2099         be_queue_free(adapter, cq);
2100 err:
2101         return -1;
2102 }
2103
2104 static void be_tx_queues_destroy(struct be_adapter *adapter)
2105 {
2106         struct be_queue_info *q;
2107         struct be_tx_obj *txo;
2108         u8 i;
2109
2110         for_all_tx_queues(adapter, txo, i) {
2111                 q = &txo->q;
2112                 if (q->created)
2113                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2114                 be_queue_free(adapter, q);
2115
2116                 q = &txo->cq;
2117                 if (q->created)
2118                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2119                 be_queue_free(adapter, q);
2120         }
2121 }
2122
2123 static int be_tx_qs_create(struct be_adapter *adapter)
2124 {
2125         struct be_queue_info *cq, *eq;
2126         struct be_tx_obj *txo;
2127         int status, i;
2128
2129         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2130
2131         for_all_tx_queues(adapter, txo, i) {
2132                 cq = &txo->cq;
2133                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2134                                         sizeof(struct be_eth_tx_compl));
2135                 if (status)
2136                         return status;
2137
2138                 u64_stats_init(&txo->stats.sync);
2139                 u64_stats_init(&txo->stats.sync_compl);
2140
2141                 /* If num_evt_qs is less than num_tx_qs, then more than
2142                  * one txq share an eq
2143                  */
2144                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2145                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2146                 if (status)
2147                         return status;
2148
2149                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2150                                         sizeof(struct be_eth_wrb));
2151                 if (status)
2152                         return status;
2153
2154                 status = be_cmd_txq_create(adapter, txo);
2155                 if (status)
2156                         return status;
2157         }
2158
2159         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2160                  adapter->num_tx_qs);
2161         return 0;
2162 }
2163
2164 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2165 {
2166         struct be_queue_info *q;
2167         struct be_rx_obj *rxo;
2168         int i;
2169
2170         for_all_rx_queues(adapter, rxo, i) {
2171                 q = &rxo->cq;
2172                 if (q->created)
2173                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2174                 be_queue_free(adapter, q);
2175         }
2176 }
2177
2178 static int be_rx_cqs_create(struct be_adapter *adapter)
2179 {
2180         struct be_queue_info *eq, *cq;
2181         struct be_rx_obj *rxo;
2182         int rc, i;
2183
2184         /* We can create as many RSS rings as there are EQs. */
2185         adapter->num_rx_qs = adapter->num_evt_qs;
2186
2187         /* We'll use RSS only if atleast 2 RSS rings are supported.
2188          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2189          */
2190         if (adapter->num_rx_qs > 1)
2191                 adapter->num_rx_qs++;
2192
2193         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2194         for_all_rx_queues(adapter, rxo, i) {
2195                 rxo->adapter = adapter;
2196                 cq = &rxo->cq;
2197                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2198                                 sizeof(struct be_eth_rx_compl));
2199                 if (rc)
2200                         return rc;
2201
2202                 u64_stats_init(&rxo->stats.sync);
2203                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2204                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2205                 if (rc)
2206                         return rc;
2207         }
2208
2209         dev_info(&adapter->pdev->dev,
2210                  "created %d RSS queue(s) and 1 default RX queue\n",
2211                  adapter->num_rx_qs - 1);
2212         return 0;
2213 }
2214
2215 static irqreturn_t be_intx(int irq, void *dev)
2216 {
2217         struct be_eq_obj *eqo = dev;
2218         struct be_adapter *adapter = eqo->adapter;
2219         int num_evts = 0;
2220
2221         /* IRQ is not expected when NAPI is scheduled as the EQ
2222          * will not be armed.
2223          * But, this can happen on Lancer INTx where it takes
2224          * a while to de-assert INTx or in BE2 where occasionaly
2225          * an interrupt may be raised even when EQ is unarmed.
2226          * If NAPI is already scheduled, then counting & notifying
2227          * events will orphan them.
2228          */
2229         if (napi_schedule_prep(&eqo->napi)) {
2230                 num_evts = events_get(eqo);
2231                 __napi_schedule(&eqo->napi);
2232                 if (num_evts)
2233                         eqo->spurious_intr = 0;
2234         }
2235         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2236
2237         /* Return IRQ_HANDLED only for the the first spurious intr
2238          * after a valid intr to stop the kernel from branding
2239          * this irq as a bad one!
2240          */
2241         if (num_evts || eqo->spurious_intr++ == 0)
2242                 return IRQ_HANDLED;
2243         else
2244                 return IRQ_NONE;
2245 }
2246
2247 static irqreturn_t be_msix(int irq, void *dev)
2248 {
2249         struct be_eq_obj *eqo = dev;
2250
2251         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2252         napi_schedule(&eqo->napi);
2253         return IRQ_HANDLED;
2254 }
2255
2256 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2257 {
2258         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2259 }
2260
2261 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2262                         int budget, int polling)
2263 {
2264         struct be_adapter *adapter = rxo->adapter;
2265         struct be_queue_info *rx_cq = &rxo->cq;
2266         struct be_rx_compl_info *rxcp;
2267         u32 work_done;
2268
2269         for (work_done = 0; work_done < budget; work_done++) {
2270                 rxcp = be_rx_compl_get(rxo);
2271                 if (!rxcp)
2272                         break;
2273
2274                 /* Is it a flush compl that has no data */
2275                 if (unlikely(rxcp->num_rcvd == 0))
2276                         goto loop_continue;
2277
2278                 /* Discard compl with partial DMA Lancer B0 */
2279                 if (unlikely(!rxcp->pkt_size)) {
2280                         be_rx_compl_discard(rxo, rxcp);
2281                         goto loop_continue;
2282                 }
2283
2284                 /* On BE drop pkts that arrive due to imperfect filtering in
2285                  * promiscuous mode on some skews
2286                  */
2287                 if (unlikely(rxcp->port != adapter->port_num &&
2288                                 !lancer_chip(adapter))) {
2289                         be_rx_compl_discard(rxo, rxcp);
2290                         goto loop_continue;
2291                 }
2292
2293                 /* Don't do gro when we're busy_polling */
2294                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2295                         be_rx_compl_process_gro(rxo, napi, rxcp);
2296                 else
2297                         be_rx_compl_process(rxo, napi, rxcp);
2298
2299 loop_continue:
2300                 be_rx_stats_update(rxo, rxcp);
2301         }
2302
2303         if (work_done) {
2304                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2305
2306                 /* When an rx-obj gets into post_starved state, just
2307                  * let be_worker do the posting.
2308                  */
2309                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2310                     !rxo->rx_post_starved)
2311                         be_post_rx_frags(rxo, GFP_ATOMIC);
2312         }
2313
2314         return work_done;
2315 }
2316
2317 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2318                           int budget, int idx)
2319 {
2320         struct be_eth_tx_compl *txcp;
2321         int num_wrbs = 0, work_done;
2322
2323         for (work_done = 0; work_done < budget; work_done++) {
2324                 txcp = be_tx_compl_get(&txo->cq);
2325                 if (!txcp)
2326                         break;
2327                 num_wrbs += be_tx_compl_process(adapter, txo,
2328                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2329                                         wrb_index, txcp));
2330         }
2331
2332         if (work_done) {
2333                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2334                 atomic_sub(num_wrbs, &txo->q.used);
2335
2336                 /* As Tx wrbs have been freed up, wake up netdev queue
2337                  * if it was stopped due to lack of tx wrbs.  */
2338                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2339                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2340                         netif_wake_subqueue(adapter->netdev, idx);
2341                 }
2342
2343                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2344                 tx_stats(txo)->tx_compl += work_done;
2345                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2346         }
2347         return (work_done < budget); /* Done */
2348 }
2349
2350 int be_poll(struct napi_struct *napi, int budget)
2351 {
2352         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2353         struct be_adapter *adapter = eqo->adapter;
2354         int max_work = 0, work, i, num_evts;
2355         struct be_rx_obj *rxo;
2356         bool tx_done;
2357
2358         num_evts = events_get(eqo);
2359
2360         /* Process all TXQs serviced by this EQ */
2361         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2362                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2363                                         eqo->tx_budget, i);
2364                 if (!tx_done)
2365                         max_work = budget;
2366         }
2367
2368         if (be_lock_napi(eqo)) {
2369                 /* This loop will iterate twice for EQ0 in which
2370                  * completions of the last RXQ (default one) are also processed
2371                  * For other EQs the loop iterates only once
2372                  */
2373                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2374                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2375                         max_work = max(work, max_work);
2376                 }
2377                 be_unlock_napi(eqo);
2378         } else {
2379                 max_work = budget;
2380         }
2381
2382         if (is_mcc_eqo(eqo))
2383                 be_process_mcc(adapter);
2384
2385         if (max_work < budget) {
2386                 napi_complete(napi);
2387                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2388         } else {
2389                 /* As we'll continue in polling mode, count and clear events */
2390                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2391         }
2392         return max_work;
2393 }
2394
2395 #ifdef CONFIG_NET_RX_BUSY_POLL
2396 static int be_busy_poll(struct napi_struct *napi)
2397 {
2398         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2399         struct be_adapter *adapter = eqo->adapter;
2400         struct be_rx_obj *rxo;
2401         int i, work = 0;
2402
2403         if (!be_lock_busy_poll(eqo))
2404                 return LL_FLUSH_BUSY;
2405
2406         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2407                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2408                 if (work)
2409                         break;
2410         }
2411
2412         be_unlock_busy_poll(eqo);
2413         return work;
2414 }
2415 #endif
2416
2417 void be_detect_error(struct be_adapter *adapter)
2418 {
2419         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2420         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2421         u32 i;
2422
2423         if (be_hw_error(adapter))
2424                 return;
2425
2426         if (lancer_chip(adapter)) {
2427                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2428                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2429                         sliport_err1 = ioread32(adapter->db +
2430                                         SLIPORT_ERROR1_OFFSET);
2431                         sliport_err2 = ioread32(adapter->db +
2432                                         SLIPORT_ERROR2_OFFSET);
2433                 }
2434         } else {
2435                 pci_read_config_dword(adapter->pdev,
2436                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2437                 pci_read_config_dword(adapter->pdev,
2438                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2439                 pci_read_config_dword(adapter->pdev,
2440                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2441                 pci_read_config_dword(adapter->pdev,
2442                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2443
2444                 ue_lo = (ue_lo & ~ue_lo_mask);
2445                 ue_hi = (ue_hi & ~ue_hi_mask);
2446         }
2447
2448         /* On certain platforms BE hardware can indicate spurious UEs.
2449          * Allow the h/w to stop working completely in case of a real UE.
2450          * Hence not setting the hw_error for UE detection.
2451          */
2452         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2453                 adapter->hw_error = true;
2454                 /* Do not log error messages if its a FW reset */
2455                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2456                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2457                         dev_info(&adapter->pdev->dev,
2458                                  "Firmware update in progress\n");
2459                         return;
2460                 } else {
2461                         dev_err(&adapter->pdev->dev,
2462                                 "Error detected in the card\n");
2463                 }
2464         }
2465
2466         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2467                 dev_err(&adapter->pdev->dev,
2468                         "ERR: sliport status 0x%x\n", sliport_status);
2469                 dev_err(&adapter->pdev->dev,
2470                         "ERR: sliport error1 0x%x\n", sliport_err1);
2471                 dev_err(&adapter->pdev->dev,
2472                         "ERR: sliport error2 0x%x\n", sliport_err2);
2473         }
2474
2475         if (ue_lo) {
2476                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2477                         if (ue_lo & 1)
2478                                 dev_err(&adapter->pdev->dev,
2479                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2480                 }
2481         }
2482
2483         if (ue_hi) {
2484                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2485                         if (ue_hi & 1)
2486                                 dev_err(&adapter->pdev->dev,
2487                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2488                 }
2489         }
2490
2491 }
2492
2493 static void be_msix_disable(struct be_adapter *adapter)
2494 {
2495         if (msix_enabled(adapter)) {
2496                 pci_disable_msix(adapter->pdev);
2497                 adapter->num_msix_vec = 0;
2498                 adapter->num_msix_roce_vec = 0;
2499         }
2500 }
2501
2502 static int be_msix_enable(struct be_adapter *adapter)
2503 {
2504         int i, status, num_vec;
2505         struct device *dev = &adapter->pdev->dev;
2506
2507         /* If RoCE is supported, program the max number of NIC vectors that
2508          * may be configured via set-channels, along with vectors needed for
2509          * RoCe. Else, just program the number we'll use initially.
2510          */
2511         if (be_roce_supported(adapter))
2512                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2513                                 2 * num_online_cpus());
2514         else
2515                 num_vec = adapter->cfg_num_qs;
2516
2517         for (i = 0; i < num_vec; i++)
2518                 adapter->msix_entries[i].entry = i;
2519
2520         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2521         if (status == 0) {
2522                 goto done;
2523         } else if (status >= MIN_MSIX_VECTORS) {
2524                 num_vec = status;
2525                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2526                                          num_vec);
2527                 if (!status)
2528                         goto done;
2529         }
2530
2531         dev_warn(dev, "MSIx enable failed\n");
2532
2533         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2534         if (!be_physfn(adapter))
2535                 return status;
2536         return 0;
2537 done:
2538         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2539                 adapter->num_msix_roce_vec = num_vec / 2;
2540                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2541                          adapter->num_msix_roce_vec);
2542         }
2543
2544         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2545
2546         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2547                  adapter->num_msix_vec);
2548         return 0;
2549 }
2550
2551 static inline int be_msix_vec_get(struct be_adapter *adapter,
2552                                 struct be_eq_obj *eqo)
2553 {
2554         return adapter->msix_entries[eqo->msix_idx].vector;
2555 }
2556
2557 static int be_msix_register(struct be_adapter *adapter)
2558 {
2559         struct net_device *netdev = adapter->netdev;
2560         struct be_eq_obj *eqo;
2561         int status, i, vec;
2562
2563         for_all_evt_queues(adapter, eqo, i) {
2564                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2565                 vec = be_msix_vec_get(adapter, eqo);
2566                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2567                 if (status)
2568                         goto err_msix;
2569         }
2570
2571         return 0;
2572 err_msix:
2573         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2574                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2575         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2576                 status);
2577         be_msix_disable(adapter);
2578         return status;
2579 }
2580
2581 static int be_irq_register(struct be_adapter *adapter)
2582 {
2583         struct net_device *netdev = adapter->netdev;
2584         int status;
2585
2586         if (msix_enabled(adapter)) {
2587                 status = be_msix_register(adapter);
2588                 if (status == 0)
2589                         goto done;
2590                 /* INTx is not supported for VF */
2591                 if (!be_physfn(adapter))
2592                         return status;
2593         }
2594
2595         /* INTx: only the first EQ is used */
2596         netdev->irq = adapter->pdev->irq;
2597         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2598                              &adapter->eq_obj[0]);
2599         if (status) {
2600                 dev_err(&adapter->pdev->dev,
2601                         "INTx request IRQ failed - err %d\n", status);
2602                 return status;
2603         }
2604 done:
2605         adapter->isr_registered = true;
2606         return 0;
2607 }
2608
2609 static void be_irq_unregister(struct be_adapter *adapter)
2610 {
2611         struct net_device *netdev = adapter->netdev;
2612         struct be_eq_obj *eqo;
2613         int i;
2614
2615         if (!adapter->isr_registered)
2616                 return;
2617
2618         /* INTx */
2619         if (!msix_enabled(adapter)) {
2620                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2621                 goto done;
2622         }
2623
2624         /* MSIx */
2625         for_all_evt_queues(adapter, eqo, i)
2626                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2627
2628 done:
2629         adapter->isr_registered = false;
2630 }
2631
2632 static void be_rx_qs_destroy(struct be_adapter *adapter)
2633 {
2634         struct be_queue_info *q;
2635         struct be_rx_obj *rxo;
2636         int i;
2637
2638         for_all_rx_queues(adapter, rxo, i) {
2639                 q = &rxo->q;
2640                 if (q->created) {
2641                         be_cmd_rxq_destroy(adapter, q);
2642                         be_rx_cq_clean(rxo);
2643                 }
2644                 be_queue_free(adapter, q);
2645         }
2646 }
2647
2648 static int be_close(struct net_device *netdev)
2649 {
2650         struct be_adapter *adapter = netdev_priv(netdev);
2651         struct be_eq_obj *eqo;
2652         int i;
2653
2654         be_roce_dev_close(adapter);
2655
2656         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2657                 for_all_evt_queues(adapter, eqo, i) {
2658                         napi_disable(&eqo->napi);
2659                         be_disable_busy_poll(eqo);
2660                 }
2661                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2662         }
2663
2664         be_async_mcc_disable(adapter);
2665
2666         /* Wait for all pending tx completions to arrive so that
2667          * all tx skbs are freed.
2668          */
2669         netif_tx_disable(netdev);
2670         be_tx_compl_clean(adapter);
2671
2672         be_rx_qs_destroy(adapter);
2673
2674         for (i = 1; i < (adapter->uc_macs + 1); i++)
2675                 be_cmd_pmac_del(adapter, adapter->if_handle,
2676                                 adapter->pmac_id[i], 0);
2677         adapter->uc_macs = 0;
2678
2679         for_all_evt_queues(adapter, eqo, i) {
2680                 if (msix_enabled(adapter))
2681                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2682                 else
2683                         synchronize_irq(netdev->irq);
2684                 be_eq_clean(eqo);
2685         }
2686
2687         be_irq_unregister(adapter);
2688
2689         return 0;
2690 }
2691
2692 static int be_rx_qs_create(struct be_adapter *adapter)
2693 {
2694         struct be_rx_obj *rxo;
2695         int rc, i, j;
2696         u8 rsstable[128];
2697
2698         for_all_rx_queues(adapter, rxo, i) {
2699                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2700                                     sizeof(struct be_eth_rx_d));
2701                 if (rc)
2702                         return rc;
2703         }
2704
2705         /* The FW would like the default RXQ to be created first */
2706         rxo = default_rxo(adapter);
2707         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2708                                adapter->if_handle, false, &rxo->rss_id);
2709         if (rc)
2710                 return rc;
2711
2712         for_all_rss_queues(adapter, rxo, i) {
2713                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2714                                        rx_frag_size, adapter->if_handle,
2715                                        true, &rxo->rss_id);
2716                 if (rc)
2717                         return rc;
2718         }
2719
2720         if (be_multi_rxq(adapter)) {
2721                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2722                         for_all_rss_queues(adapter, rxo, i) {
2723                                 if ((j + i) >= 128)
2724                                         break;
2725                                 rsstable[j + i] = rxo->rss_id;
2726                         }
2727                 }
2728                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2729                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2730
2731                 if (!BEx_chip(adapter))
2732                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2733                                                 RSS_ENABLE_UDP_IPV6;
2734         } else {
2735                 /* Disable RSS, if only default RX Q is created */
2736                 adapter->rss_flags = RSS_ENABLE_NONE;
2737         }
2738
2739         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2740                                128);
2741         if (rc) {
2742                 adapter->rss_flags = RSS_ENABLE_NONE;
2743                 return rc;
2744         }
2745
2746         /* First time posting */
2747         for_all_rx_queues(adapter, rxo, i)
2748                 be_post_rx_frags(rxo, GFP_KERNEL);
2749         return 0;
2750 }
2751
2752 static int be_open(struct net_device *netdev)
2753 {
2754         struct be_adapter *adapter = netdev_priv(netdev);
2755         struct be_eq_obj *eqo;
2756         struct be_rx_obj *rxo;
2757         struct be_tx_obj *txo;
2758         u8 link_status;
2759         int status, i;
2760
2761         status = be_rx_qs_create(adapter);
2762         if (status)
2763                 goto err;
2764
2765         status = be_irq_register(adapter);
2766         if (status)
2767                 goto err;
2768
2769         for_all_rx_queues(adapter, rxo, i)
2770                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2771
2772         for_all_tx_queues(adapter, txo, i)
2773                 be_cq_notify(adapter, txo->cq.id, true, 0);
2774
2775         be_async_mcc_enable(adapter);
2776
2777         for_all_evt_queues(adapter, eqo, i) {
2778                 napi_enable(&eqo->napi);
2779                 be_enable_busy_poll(eqo);
2780                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2781         }
2782         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2783
2784         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2785         if (!status)
2786                 be_link_status_update(adapter, link_status);
2787
2788         netif_tx_start_all_queues(netdev);
2789         be_roce_dev_open(adapter);
2790         return 0;
2791 err:
2792         be_close(adapter->netdev);
2793         return -EIO;
2794 }
2795
2796 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2797 {
2798         struct be_dma_mem cmd;
2799         int status = 0;
2800         u8 mac[ETH_ALEN];
2801
2802         memset(mac, 0, ETH_ALEN);
2803
2804         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2805         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2806                                      GFP_KERNEL);
2807         if (cmd.va == NULL)
2808                 return -1;
2809
2810         if (enable) {
2811                 status = pci_write_config_dword(adapter->pdev,
2812                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2813                 if (status) {
2814                         dev_err(&adapter->pdev->dev,
2815                                 "Could not enable Wake-on-lan\n");
2816                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2817                                           cmd.dma);
2818                         return status;
2819                 }
2820                 status = be_cmd_enable_magic_wol(adapter,
2821                                 adapter->netdev->dev_addr, &cmd);
2822                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2823                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2824         } else {
2825                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2826                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2827                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2828         }
2829
2830         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2831         return status;
2832 }
2833
2834 /*
2835  * Generate a seed MAC address from the PF MAC Address using jhash.
2836  * MAC Address for VFs are assigned incrementally starting from the seed.
2837  * These addresses are programmed in the ASIC by the PF and the VF driver
2838  * queries for the MAC address during its probe.
2839  */
2840 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2841 {
2842         u32 vf;
2843         int status = 0;
2844         u8 mac[ETH_ALEN];
2845         struct be_vf_cfg *vf_cfg;
2846
2847         be_vf_eth_addr_generate(adapter, mac);
2848
2849         for_all_vfs(adapter, vf_cfg, vf) {
2850                 if (BEx_chip(adapter))
2851                         status = be_cmd_pmac_add(adapter, mac,
2852                                                  vf_cfg->if_handle,
2853                                                  &vf_cfg->pmac_id, vf + 1);
2854                 else
2855                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2856                                                 vf + 1);
2857
2858                 if (status)
2859                         dev_err(&adapter->pdev->dev,
2860                         "Mac address assignment failed for VF %d\n", vf);
2861                 else
2862                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2863
2864                 mac[5] += 1;
2865         }
2866         return status;
2867 }
2868
2869 static int be_vfs_mac_query(struct be_adapter *adapter)
2870 {
2871         int status, vf;
2872         u8 mac[ETH_ALEN];
2873         struct be_vf_cfg *vf_cfg;
2874
2875         for_all_vfs(adapter, vf_cfg, vf) {
2876                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2877                                                mac, vf_cfg->if_handle,
2878                                                false, vf+1);
2879                 if (status)
2880                         return status;
2881                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2882         }
2883         return 0;
2884 }
2885
2886 static void be_vf_clear(struct be_adapter *adapter)
2887 {
2888         struct be_vf_cfg *vf_cfg;
2889         u32 vf;
2890
2891         if (pci_vfs_assigned(adapter->pdev)) {
2892                 dev_warn(&adapter->pdev->dev,
2893                          "VFs are assigned to VMs: not disabling VFs\n");
2894                 goto done;
2895         }
2896
2897         pci_disable_sriov(adapter->pdev);
2898
2899         for_all_vfs(adapter, vf_cfg, vf) {
2900                 if (BEx_chip(adapter))
2901                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2902                                         vf_cfg->pmac_id, vf + 1);
2903                 else
2904                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2905                                        vf + 1);
2906
2907                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2908         }
2909 done:
2910         kfree(adapter->vf_cfg);
2911         adapter->num_vfs = 0;
2912 }
2913
2914 static void be_clear_queues(struct be_adapter *adapter)
2915 {
2916         be_mcc_queues_destroy(adapter);
2917         be_rx_cqs_destroy(adapter);
2918         be_tx_queues_destroy(adapter);
2919         be_evt_queues_destroy(adapter);
2920 }
2921
2922 static void be_cancel_worker(struct be_adapter *adapter)
2923 {
2924         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2925                 cancel_delayed_work_sync(&adapter->work);
2926                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2927         }
2928 }
2929
2930 static void be_mac_clear(struct be_adapter *adapter)
2931 {
2932         int i;
2933
2934         if (adapter->pmac_id) {
2935                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2936                         be_cmd_pmac_del(adapter, adapter->if_handle,
2937                                         adapter->pmac_id[i], 0);
2938                 adapter->uc_macs = 0;
2939
2940                 kfree(adapter->pmac_id);
2941                 adapter->pmac_id = NULL;
2942         }
2943 }
2944
2945 static int be_clear(struct be_adapter *adapter)
2946 {
2947         be_cancel_worker(adapter);
2948
2949         if (sriov_enabled(adapter))
2950                 be_vf_clear(adapter);
2951
2952         /* delete the primary mac along with the uc-mac list */
2953         be_mac_clear(adapter);
2954
2955         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2956
2957         be_clear_queues(adapter);
2958
2959         be_msix_disable(adapter);
2960         return 0;
2961 }
2962
2963 static int be_vfs_if_create(struct be_adapter *adapter)
2964 {
2965         struct be_resources res = {0};
2966         struct be_vf_cfg *vf_cfg;
2967         u32 cap_flags, en_flags, vf;
2968         int status = 0;
2969
2970         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2971                     BE_IF_FLAGS_MULTICAST;
2972
2973         for_all_vfs(adapter, vf_cfg, vf) {
2974                 if (!BE3_chip(adapter)) {
2975                         status = be_cmd_get_profile_config(adapter, &res,
2976                                                            vf + 1);
2977                         if (!status)
2978                                 cap_flags = res.if_cap_flags;
2979                 }
2980
2981                 /* If a FW profile exists, then cap_flags are updated */
2982                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2983                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2984                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2985                                           &vf_cfg->if_handle, vf + 1);
2986                 if (status)
2987                         goto err;
2988         }
2989 err:
2990         return status;
2991 }
2992
2993 static int be_vf_setup_init(struct be_adapter *adapter)
2994 {
2995         struct be_vf_cfg *vf_cfg;
2996         int vf;
2997
2998         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2999                                   GFP_KERNEL);
3000         if (!adapter->vf_cfg)
3001                 return -ENOMEM;
3002
3003         for_all_vfs(adapter, vf_cfg, vf) {
3004                 vf_cfg->if_handle = -1;
3005                 vf_cfg->pmac_id = -1;
3006         }
3007         return 0;
3008 }
3009
3010 static int be_vf_setup(struct be_adapter *adapter)
3011 {
3012         struct device *dev = &adapter->pdev->dev;
3013         struct be_vf_cfg *vf_cfg;
3014         int status, old_vfs, vf;
3015         u32 privileges;
3016         u16 lnk_speed;
3017
3018         old_vfs = pci_num_vf(adapter->pdev);
3019         if (old_vfs) {
3020                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3021                 if (old_vfs != num_vfs)
3022                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3023                 adapter->num_vfs = old_vfs;
3024         } else {
3025                 if (num_vfs > be_max_vfs(adapter))
3026                         dev_info(dev, "Device supports %d VFs and not %d\n",
3027                                  be_max_vfs(adapter), num_vfs);
3028                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3029                 if (!adapter->num_vfs)
3030                         return 0;
3031         }
3032
3033         status = be_vf_setup_init(adapter);
3034         if (status)
3035                 goto err;
3036
3037         if (old_vfs) {
3038                 for_all_vfs(adapter, vf_cfg, vf) {
3039                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3040                         if (status)
3041                                 goto err;
3042                 }
3043         } else {
3044                 status = be_vfs_if_create(adapter);
3045                 if (status)
3046                         goto err;
3047         }
3048
3049         if (old_vfs) {
3050                 status = be_vfs_mac_query(adapter);
3051                 if (status)
3052                         goto err;
3053         } else {
3054                 status = be_vf_eth_addr_config(adapter);
3055                 if (status)
3056                         goto err;
3057         }
3058
3059         for_all_vfs(adapter, vf_cfg, vf) {
3060                 /* Allow VFs to programs MAC/VLAN filters */
3061                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3062                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3063                         status = be_cmd_set_fn_privileges(adapter,
3064                                                           privileges |
3065                                                           BE_PRIV_FILTMGMT,
3066                                                           vf + 1);
3067                         if (!status)
3068                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3069                                          vf);
3070                 }
3071
3072                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3073                  * Allow full available bandwidth
3074                  */
3075                 if (BE3_chip(adapter) && !old_vfs)
3076                         be_cmd_set_qos(adapter, 1000, vf+1);
3077
3078                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3079                                                   NULL, vf + 1);
3080                 if (!status)
3081                         vf_cfg->tx_rate = lnk_speed;
3082
3083                 if (!old_vfs)
3084                         be_cmd_enable_vf(adapter, vf + 1);
3085         }
3086
3087         if (!old_vfs) {
3088                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3089                 if (status) {
3090                         dev_err(dev, "SRIOV enable failed\n");
3091                         adapter->num_vfs = 0;
3092                         goto err;
3093                 }
3094         }
3095         return 0;
3096 err:
3097         dev_err(dev, "VF setup failed\n");
3098         be_vf_clear(adapter);
3099         return status;
3100 }
3101
3102 /* On BE2/BE3 FW does not suggest the supported limits */
3103 static void BEx_get_resources(struct be_adapter *adapter,
3104                               struct be_resources *res)
3105 {
3106         struct pci_dev *pdev = adapter->pdev;
3107         bool use_sriov = false;
3108         int max_vfs;
3109
3110         max_vfs = pci_sriov_get_totalvfs(pdev);
3111
3112         if (BE3_chip(adapter) && sriov_want(adapter)) {
3113                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3114                 use_sriov = res->max_vfs;
3115         }
3116
3117         if (be_physfn(adapter))
3118                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3119         else
3120                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3121
3122         if (adapter->function_mode & FLEX10_MODE)
3123                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3124         else if (adapter->function_mode & UMC_ENABLED)
3125                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3126         else
3127                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3128         res->max_mcast_mac = BE_MAX_MC;
3129
3130         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3131         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3132             !be_physfn(adapter) || (adapter->port_num > 1))
3133                 res->max_tx_qs = 1;
3134         else
3135                 res->max_tx_qs = BE3_MAX_TX_QS;
3136
3137         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3138             !use_sriov && be_physfn(adapter))
3139                 res->max_rss_qs = (adapter->be3_native) ?
3140                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3141         res->max_rx_qs = res->max_rss_qs + 1;
3142
3143         if (be_physfn(adapter))
3144                 res->max_evt_qs = (max_vfs > 0) ?
3145                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3146         else
3147                 res->max_evt_qs = 1;
3148
3149         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3150         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3151                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3152 }
3153
3154 static void be_setup_init(struct be_adapter *adapter)
3155 {
3156         adapter->vlan_prio_bmap = 0xff;
3157         adapter->phy.link_speed = -1;
3158         adapter->if_handle = -1;
3159         adapter->be3_native = false;
3160         adapter->promiscuous = false;
3161         if (be_physfn(adapter))
3162                 adapter->cmd_privileges = MAX_PRIVILEGES;
3163         else
3164                 adapter->cmd_privileges = MIN_PRIVILEGES;
3165 }
3166
3167 static int be_get_resources(struct be_adapter *adapter)
3168 {
3169         struct device *dev = &adapter->pdev->dev;
3170         struct be_resources res = {0};
3171         int status;
3172
3173         if (BEx_chip(adapter)) {
3174                 BEx_get_resources(adapter, &res);
3175                 adapter->res = res;
3176         }
3177
3178         /* For Lancer, SH etc read per-function resource limits from FW.
3179          * GET_FUNC_CONFIG returns per function guaranteed limits.
3180          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3181          */
3182         if (!BEx_chip(adapter)) {
3183                 status = be_cmd_get_func_config(adapter, &res);
3184                 if (status)
3185                         return status;
3186
3187                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3188                 if (be_roce_supported(adapter))
3189                         res.max_evt_qs /= 2;
3190                 adapter->res = res;
3191
3192                 if (be_physfn(adapter)) {
3193                         status = be_cmd_get_profile_config(adapter, &res, 0);
3194                         if (status)
3195                                 return status;
3196                         adapter->res.max_vfs = res.max_vfs;
3197                 }
3198
3199                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3200                          be_max_txqs(adapter), be_max_rxqs(adapter),
3201                          be_max_rss(adapter), be_max_eqs(adapter),
3202                          be_max_vfs(adapter));
3203                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3204                          be_max_uc(adapter), be_max_mc(adapter),
3205                          be_max_vlans(adapter));
3206         }
3207
3208         return 0;
3209 }
3210
3211 /* Routine to query per function resource limits */
3212 static int be_get_config(struct be_adapter *adapter)
3213 {
3214         u16 profile_id;
3215         int status;
3216
3217         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3218                                      &adapter->function_mode,
3219                                      &adapter->function_caps,
3220                                      &adapter->asic_rev);
3221         if (status)
3222                 return status;
3223
3224          if (be_physfn(adapter)) {
3225                 status = be_cmd_get_active_profile(adapter, &profile_id);
3226                 if (!status)
3227                         dev_info(&adapter->pdev->dev,
3228                                  "Using profile 0x%x\n", profile_id);
3229         }
3230
3231         status = be_get_resources(adapter);
3232         if (status)
3233                 return status;
3234
3235         /* primary mac needs 1 pmac entry */
3236         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3237                                    GFP_KERNEL);
3238         if (!adapter->pmac_id)
3239                 return -ENOMEM;
3240
3241         /* Sanitize cfg_num_qs based on HW and platform limits */
3242         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3243
3244         return 0;
3245 }
3246
3247 static int be_mac_setup(struct be_adapter *adapter)
3248 {
3249         u8 mac[ETH_ALEN];
3250         int status;
3251
3252         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3253                 status = be_cmd_get_perm_mac(adapter, mac);
3254                 if (status)
3255                         return status;
3256
3257                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3258                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3259         } else {
3260                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3261                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3262         }
3263
3264         /* For BE3-R VFs, the PF programs the initial MAC address */
3265         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3266                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3267                                 &adapter->pmac_id[0], 0);
3268         return 0;
3269 }
3270
3271 static void be_schedule_worker(struct be_adapter *adapter)
3272 {
3273         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3274         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3275 }
3276
3277 static int be_setup_queues(struct be_adapter *adapter)
3278 {
3279         struct net_device *netdev = adapter->netdev;
3280         int status;
3281
3282         status = be_evt_queues_create(adapter);
3283         if (status)
3284                 goto err;
3285
3286         status = be_tx_qs_create(adapter);
3287         if (status)
3288                 goto err;
3289
3290         status = be_rx_cqs_create(adapter);
3291         if (status)
3292                 goto err;
3293
3294         status = be_mcc_queues_create(adapter);
3295         if (status)
3296                 goto err;
3297
3298         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3299         if (status)
3300                 goto err;
3301
3302         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3303         if (status)
3304                 goto err;
3305
3306         return 0;
3307 err:
3308         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3309         return status;
3310 }
3311
3312 int be_update_queues(struct be_adapter *adapter)
3313 {
3314         struct net_device *netdev = adapter->netdev;
3315         int status;
3316
3317         if (netif_running(netdev))
3318                 be_close(netdev);
3319
3320         be_cancel_worker(adapter);
3321
3322         /* If any vectors have been shared with RoCE we cannot re-program
3323          * the MSIx table.
3324          */
3325         if (!adapter->num_msix_roce_vec)
3326                 be_msix_disable(adapter);
3327
3328         be_clear_queues(adapter);
3329
3330         if (!msix_enabled(adapter)) {
3331                 status = be_msix_enable(adapter);
3332                 if (status)
3333                         return status;
3334         }
3335
3336         status = be_setup_queues(adapter);
3337         if (status)
3338                 return status;
3339
3340         be_schedule_worker(adapter);
3341
3342         if (netif_running(netdev))
3343                 status = be_open(netdev);
3344
3345         return status;
3346 }
3347
3348 static int be_setup(struct be_adapter *adapter)
3349 {
3350         struct device *dev = &adapter->pdev->dev;
3351         u32 tx_fc, rx_fc, en_flags;
3352         int status;
3353
3354         be_setup_init(adapter);
3355
3356         if (!lancer_chip(adapter))
3357                 be_cmd_req_native_mode(adapter);
3358
3359         status = be_get_config(adapter);
3360         if (status)
3361                 goto err;
3362
3363         status = be_msix_enable(adapter);
3364         if (status)
3365                 goto err;
3366
3367         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3368                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3369         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3370                 en_flags |= BE_IF_FLAGS_RSS;
3371         en_flags = en_flags & be_if_cap_flags(adapter);
3372         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3373                                   &adapter->if_handle, 0);
3374         if (status)
3375                 goto err;
3376
3377         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3378         rtnl_lock();
3379         status = be_setup_queues(adapter);
3380         rtnl_unlock();
3381         if (status)
3382                 goto err;
3383
3384         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3385
3386         status = be_mac_setup(adapter);
3387         if (status)
3388                 goto err;
3389
3390         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3391
3392         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3393                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3394                         adapter->fw_ver);
3395                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3396         }
3397
3398         if (adapter->vlans_added)
3399                 be_vid_config(adapter);
3400
3401         be_set_rx_mode(adapter->netdev);
3402
3403         be_cmd_get_acpi_wol_cap(adapter);
3404
3405         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3406
3407         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3408                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3409                                         adapter->rx_fc);
3410
3411         if (sriov_want(adapter)) {
3412                 if (be_max_vfs(adapter))
3413                         be_vf_setup(adapter);
3414                 else
3415                         dev_warn(dev, "device doesn't support SRIOV\n");
3416         }
3417
3418         status = be_cmd_get_phy_info(adapter);
3419         if (!status && be_pause_supported(adapter))
3420                 adapter->phy.fc_autoneg = 1;
3421
3422         be_schedule_worker(adapter);
3423         return 0;
3424 err:
3425         be_clear(adapter);
3426         return status;
3427 }
3428
3429 #ifdef CONFIG_NET_POLL_CONTROLLER
3430 static void be_netpoll(struct net_device *netdev)
3431 {
3432         struct be_adapter *adapter = netdev_priv(netdev);
3433         struct be_eq_obj *eqo;
3434         int i;
3435
3436         for_all_evt_queues(adapter, eqo, i) {
3437                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3438                 napi_schedule(&eqo->napi);
3439         }
3440
3441         return;
3442 }
3443 #endif
3444
3445 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3446 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3447
3448 static bool be_flash_redboot(struct be_adapter *adapter,
3449                         const u8 *p, u32 img_start, int image_size,
3450                         int hdr_size)
3451 {
3452         u32 crc_offset;
3453         u8 flashed_crc[4];
3454         int status;
3455
3456         crc_offset = hdr_size + img_start + image_size - 4;
3457
3458         p += crc_offset;
3459
3460         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3461                         (image_size - 4));
3462         if (status) {
3463                 dev_err(&adapter->pdev->dev,
3464                 "could not get crc from flash, not flashing redboot\n");
3465                 return false;
3466         }
3467
3468         /*update redboot only if crc does not match*/
3469         if (!memcmp(flashed_crc, p, 4))
3470                 return false;
3471         else
3472                 return true;
3473 }
3474
3475 static bool phy_flashing_required(struct be_adapter *adapter)
3476 {
3477         return (adapter->phy.phy_type == TN_8022 &&
3478                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3479 }
3480
3481 static bool is_comp_in_ufi(struct be_adapter *adapter,
3482                            struct flash_section_info *fsec, int type)
3483 {
3484         int i = 0, img_type = 0;
3485         struct flash_section_info_g2 *fsec_g2 = NULL;
3486
3487         if (BE2_chip(adapter))
3488                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3489
3490         for (i = 0; i < MAX_FLASH_COMP; i++) {
3491                 if (fsec_g2)
3492                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3493                 else
3494                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3495
3496                 if (img_type == type)
3497                         return true;
3498         }
3499         return false;
3500
3501 }
3502
3503 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3504                                          int header_size,
3505                                          const struct firmware *fw)
3506 {
3507         struct flash_section_info *fsec = NULL;
3508         const u8 *p = fw->data;
3509
3510         p += header_size;
3511         while (p < (fw->data + fw->size)) {
3512                 fsec = (struct flash_section_info *)p;
3513                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3514                         return fsec;
3515                 p += 32;
3516         }
3517         return NULL;
3518 }
3519
3520 static int be_flash(struct be_adapter *adapter, const u8 *img,
3521                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3522 {
3523         u32 total_bytes = 0, flash_op, num_bytes = 0;
3524         int status = 0;
3525         struct be_cmd_write_flashrom *req = flash_cmd->va;
3526
3527         total_bytes = img_size;
3528         while (total_bytes) {
3529                 num_bytes = min_t(u32, 32*1024, total_bytes);
3530
3531                 total_bytes -= num_bytes;
3532
3533                 if (!total_bytes) {
3534                         if (optype == OPTYPE_PHY_FW)
3535                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3536                         else
3537                                 flash_op = FLASHROM_OPER_FLASH;
3538                 } else {
3539                         if (optype == OPTYPE_PHY_FW)
3540                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3541                         else
3542                                 flash_op = FLASHROM_OPER_SAVE;
3543                 }
3544
3545                 memcpy(req->data_buf, img, num_bytes);
3546                 img += num_bytes;
3547                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3548                                                 flash_op, num_bytes);
3549                 if (status) {
3550                         if (status == ILLEGAL_IOCTL_REQ &&
3551                             optype == OPTYPE_PHY_FW)
3552                                 break;
3553                         dev_err(&adapter->pdev->dev,
3554                                 "cmd to write to flash rom failed.\n");
3555                         return status;
3556                 }
3557         }
3558         return 0;
3559 }
3560
3561 /* For BE2, BE3 and BE3-R */
3562 static int be_flash_BEx(struct be_adapter *adapter,
3563                          const struct firmware *fw,
3564                          struct be_dma_mem *flash_cmd,
3565                          int num_of_images)
3566
3567 {
3568         int status = 0, i, filehdr_size = 0;
3569         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3570         const u8 *p = fw->data;
3571         const struct flash_comp *pflashcomp;
3572         int num_comp, redboot;
3573         struct flash_section_info *fsec = NULL;
3574
3575         struct flash_comp gen3_flash_types[] = {
3576                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3577                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3578                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3579                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3580                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3581                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3582                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3583                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3584                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3585                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3586                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3587                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3588                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3589                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3590                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3591                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3592                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3593                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3594                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3595                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3596         };
3597
3598         struct flash_comp gen2_flash_types[] = {
3599                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3600                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3601                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3602                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3603                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3604                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3605                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3606                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3607                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3608                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3609                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3610                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3611                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3612                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3613                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3614                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3615         };
3616
3617         if (BE3_chip(adapter)) {
3618                 pflashcomp = gen3_flash_types;
3619                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3620                 num_comp = ARRAY_SIZE(gen3_flash_types);
3621         } else {
3622                 pflashcomp = gen2_flash_types;
3623                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3624                 num_comp = ARRAY_SIZE(gen2_flash_types);
3625         }
3626
3627         /* Get flash section info*/
3628         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3629         if (!fsec) {
3630                 dev_err(&adapter->pdev->dev,
3631                         "Invalid Cookie. UFI corrupted ?\n");
3632                 return -1;
3633         }
3634         for (i = 0; i < num_comp; i++) {
3635                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3636                         continue;
3637
3638                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3639                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3640                         continue;
3641
3642                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3643                     !phy_flashing_required(adapter))
3644                                 continue;
3645
3646                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3647                         redboot = be_flash_redboot(adapter, fw->data,
3648                                 pflashcomp[i].offset, pflashcomp[i].size,
3649                                 filehdr_size + img_hdrs_size);
3650                         if (!redboot)
3651                                 continue;
3652                 }
3653
3654                 p = fw->data;
3655                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3656                 if (p + pflashcomp[i].size > fw->data + fw->size)
3657                         return -1;
3658
3659                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3660                                         pflashcomp[i].size);
3661                 if (status) {
3662                         dev_err(&adapter->pdev->dev,
3663                                 "Flashing section type %d failed.\n",
3664                                 pflashcomp[i].img_type);
3665                         return status;
3666                 }
3667         }
3668         return 0;
3669 }
3670
3671 static int be_flash_skyhawk(struct be_adapter *adapter,
3672                 const struct firmware *fw,
3673                 struct be_dma_mem *flash_cmd, int num_of_images)
3674 {
3675         int status = 0, i, filehdr_size = 0;
3676         int img_offset, img_size, img_optype, redboot;
3677         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3678         const u8 *p = fw->data;
3679         struct flash_section_info *fsec = NULL;
3680
3681         filehdr_size = sizeof(struct flash_file_hdr_g3);
3682         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3683         if (!fsec) {
3684                 dev_err(&adapter->pdev->dev,
3685                         "Invalid Cookie. UFI corrupted ?\n");
3686                 return -1;
3687         }
3688
3689         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3690                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3691                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3692
3693                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3694                 case IMAGE_FIRMWARE_iSCSI:
3695                         img_optype = OPTYPE_ISCSI_ACTIVE;
3696                         break;
3697                 case IMAGE_BOOT_CODE:
3698                         img_optype = OPTYPE_REDBOOT;
3699                         break;
3700                 case IMAGE_OPTION_ROM_ISCSI:
3701                         img_optype = OPTYPE_BIOS;
3702                         break;
3703                 case IMAGE_OPTION_ROM_PXE:
3704                         img_optype = OPTYPE_PXE_BIOS;
3705                         break;
3706                 case IMAGE_OPTION_ROM_FCoE:
3707                         img_optype = OPTYPE_FCOE_BIOS;
3708                         break;
3709                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3710                         img_optype = OPTYPE_ISCSI_BACKUP;
3711                         break;
3712                 case IMAGE_NCSI:
3713                         img_optype = OPTYPE_NCSI_FW;
3714                         break;
3715                 default:
3716                         continue;
3717                 }
3718
3719                 if (img_optype == OPTYPE_REDBOOT) {
3720                         redboot = be_flash_redboot(adapter, fw->data,
3721                                         img_offset, img_size,
3722                                         filehdr_size + img_hdrs_size);
3723                         if (!redboot)
3724                                 continue;
3725                 }
3726
3727                 p = fw->data;
3728                 p += filehdr_size + img_offset + img_hdrs_size;
3729                 if (p + img_size > fw->data + fw->size)
3730                         return -1;
3731
3732                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3733                 if (status) {
3734                         dev_err(&adapter->pdev->dev,
3735                                 "Flashing section type %d failed.\n",
3736                                 fsec->fsec_entry[i].type);
3737                         return status;
3738                 }
3739         }
3740         return 0;
3741 }
3742
3743 static int lancer_fw_download(struct be_adapter *adapter,
3744                                 const struct firmware *fw)
3745 {
3746 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3747 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3748         struct be_dma_mem flash_cmd;
3749         const u8 *data_ptr = NULL;
3750         u8 *dest_image_ptr = NULL;
3751         size_t image_size = 0;
3752         u32 chunk_size = 0;
3753         u32 data_written = 0;
3754         u32 offset = 0;
3755         int status = 0;
3756         u8 add_status = 0;
3757         u8 change_status;
3758
3759         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3760                 dev_err(&adapter->pdev->dev,
3761                         "FW Image not properly aligned. "
3762                         "Length must be 4 byte aligned.\n");
3763                 status = -EINVAL;
3764                 goto lancer_fw_exit;
3765         }
3766
3767         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3768                                 + LANCER_FW_DOWNLOAD_CHUNK;
3769         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3770                                           &flash_cmd.dma, GFP_KERNEL);
3771         if (!flash_cmd.va) {
3772                 status = -ENOMEM;
3773                 goto lancer_fw_exit;
3774         }
3775
3776         dest_image_ptr = flash_cmd.va +
3777                                 sizeof(struct lancer_cmd_req_write_object);
3778         image_size = fw->size;
3779         data_ptr = fw->data;
3780
3781         while (image_size) {
3782                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3783
3784                 /* Copy the image chunk content. */
3785                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3786
3787                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3788                                                  chunk_size, offset,
3789                                                  LANCER_FW_DOWNLOAD_LOCATION,
3790                                                  &data_written, &change_status,
3791                                                  &add_status);
3792                 if (status)
3793                         break;
3794
3795                 offset += data_written;
3796                 data_ptr += data_written;
3797                 image_size -= data_written;
3798         }
3799
3800         if (!status) {
3801                 /* Commit the FW written */
3802                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3803                                                  0, offset,
3804                                                  LANCER_FW_DOWNLOAD_LOCATION,
3805                                                  &data_written, &change_status,
3806                                                  &add_status);
3807         }
3808
3809         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3810                                 flash_cmd.dma);
3811         if (status) {
3812                 dev_err(&adapter->pdev->dev,
3813                         "Firmware load error. "
3814                         "Status code: 0x%x Additional Status: 0x%x\n",
3815                         status, add_status);
3816                 goto lancer_fw_exit;
3817         }
3818
3819         if (change_status == LANCER_FW_RESET_NEEDED) {
3820                 dev_info(&adapter->pdev->dev,
3821                          "Resetting adapter to activate new FW\n");
3822                 status = lancer_physdev_ctrl(adapter,
3823                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3824                 if (status) {
3825                         dev_err(&adapter->pdev->dev,
3826                                 "Adapter busy for FW reset.\n"
3827                                 "New FW will not be active.\n");
3828                         goto lancer_fw_exit;
3829                 }
3830         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3831                         dev_err(&adapter->pdev->dev,
3832                                 "System reboot required for new FW"
3833                                 " to be active\n");
3834         }
3835
3836         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3837 lancer_fw_exit:
3838         return status;
3839 }
3840
3841 #define UFI_TYPE2               2
3842 #define UFI_TYPE3               3
3843 #define UFI_TYPE3R              10
3844 #define UFI_TYPE4               4
3845 static int be_get_ufi_type(struct be_adapter *adapter,
3846                            struct flash_file_hdr_g3 *fhdr)
3847 {
3848         if (fhdr == NULL)
3849                 goto be_get_ufi_exit;
3850
3851         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3852                 return UFI_TYPE4;
3853         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3854                 if (fhdr->asic_type_rev == 0x10)
3855                         return UFI_TYPE3R;
3856                 else
3857                         return UFI_TYPE3;
3858         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3859                 return UFI_TYPE2;
3860
3861 be_get_ufi_exit:
3862         dev_err(&adapter->pdev->dev,
3863                 "UFI and Interface are not compatible for flashing\n");
3864         return -1;
3865 }
3866
3867 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3868 {
3869         struct flash_file_hdr_g3 *fhdr3;
3870         struct image_hdr *img_hdr_ptr = NULL;
3871         struct be_dma_mem flash_cmd;
3872         const u8 *p;
3873         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3874
3875         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3876         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3877                                           &flash_cmd.dma, GFP_KERNEL);
3878         if (!flash_cmd.va) {
3879                 status = -ENOMEM;
3880                 goto be_fw_exit;
3881         }
3882
3883         p = fw->data;
3884         fhdr3 = (struct flash_file_hdr_g3 *)p;
3885
3886         ufi_type = be_get_ufi_type(adapter, fhdr3);
3887
3888         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3889         for (i = 0; i < num_imgs; i++) {
3890                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3891                                 (sizeof(struct flash_file_hdr_g3) +
3892                                  i * sizeof(struct image_hdr)));
3893                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3894                         switch (ufi_type) {
3895                         case UFI_TYPE4:
3896                                 status = be_flash_skyhawk(adapter, fw,
3897                                                         &flash_cmd, num_imgs);
3898                                 break;
3899                         case UFI_TYPE3R:
3900                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3901                                                       num_imgs);
3902                                 break;
3903                         case UFI_TYPE3:
3904                                 /* Do not flash this ufi on BE3-R cards */
3905                                 if (adapter->asic_rev < 0x10)
3906                                         status = be_flash_BEx(adapter, fw,
3907                                                               &flash_cmd,
3908                                                               num_imgs);
3909                                 else {
3910                                         status = -1;
3911                                         dev_err(&adapter->pdev->dev,
3912                                                 "Can't load BE3 UFI on BE3R\n");
3913                                 }
3914                         }
3915                 }
3916         }
3917
3918         if (ufi_type == UFI_TYPE2)
3919                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3920         else if (ufi_type == -1)
3921                 status = -1;
3922
3923         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3924                           flash_cmd.dma);
3925         if (status) {
3926                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3927                 goto be_fw_exit;
3928         }
3929
3930         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3931
3932 be_fw_exit:
3933         return status;
3934 }
3935
3936 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3937 {
3938         const struct firmware *fw;
3939         int status;
3940
3941         if (!netif_running(adapter->netdev)) {
3942                 dev_err(&adapter->pdev->dev,
3943                         "Firmware load not allowed (interface is down)\n");
3944                 return -1;
3945         }
3946
3947         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3948         if (status)
3949                 goto fw_exit;
3950
3951         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3952
3953         if (lancer_chip(adapter))
3954                 status = lancer_fw_download(adapter, fw);
3955         else
3956                 status = be_fw_download(adapter, fw);
3957
3958         if (!status)
3959                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3960                                   adapter->fw_on_flash);
3961
3962 fw_exit:
3963         release_firmware(fw);
3964         return status;
3965 }
3966
3967 static int be_ndo_bridge_setlink(struct net_device *dev,
3968                                     struct nlmsghdr *nlh)
3969 {
3970         struct be_adapter *adapter = netdev_priv(dev);
3971         struct nlattr *attr, *br_spec;
3972         int rem;
3973         int status = 0;
3974         u16 mode = 0;
3975
3976         if (!sriov_enabled(adapter))
3977                 return -EOPNOTSUPP;
3978
3979         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3980
3981         nla_for_each_nested(attr, br_spec, rem) {
3982                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3983                         continue;
3984
3985                 mode = nla_get_u16(attr);
3986                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
3987                         return -EINVAL;
3988
3989                 status = be_cmd_set_hsw_config(adapter, 0, 0,
3990                                                adapter->if_handle,
3991                                                mode == BRIDGE_MODE_VEPA ?
3992                                                PORT_FWD_TYPE_VEPA :
3993                                                PORT_FWD_TYPE_VEB);
3994                 if (status)
3995                         goto err;
3996
3997                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
3998                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
3999
4000                 return status;
4001         }
4002 err:
4003         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4004                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4005
4006         return status;
4007 }
4008
4009 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4010                                     struct net_device *dev,
4011                                     u32 filter_mask)
4012 {
4013         struct be_adapter *adapter = netdev_priv(dev);
4014         int status = 0;
4015         u8 hsw_mode;
4016
4017         if (!sriov_enabled(adapter))
4018                 return 0;
4019
4020         /* BE and Lancer chips support VEB mode only */
4021         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4022                 hsw_mode = PORT_FWD_TYPE_VEB;
4023         } else {
4024                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4025                                                adapter->if_handle, &hsw_mode);
4026                 if (status)
4027                         return 0;
4028         }
4029
4030         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4031                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4032                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4033 }
4034
4035 static const struct net_device_ops be_netdev_ops = {
4036         .ndo_open               = be_open,
4037         .ndo_stop               = be_close,
4038         .ndo_start_xmit         = be_xmit,
4039         .ndo_set_rx_mode        = be_set_rx_mode,
4040         .ndo_set_mac_address    = be_mac_addr_set,
4041         .ndo_change_mtu         = be_change_mtu,
4042         .ndo_get_stats64        = be_get_stats64,
4043         .ndo_validate_addr      = eth_validate_addr,
4044         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4045         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4046         .ndo_set_vf_mac         = be_set_vf_mac,
4047         .ndo_set_vf_vlan        = be_set_vf_vlan,
4048         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4049         .ndo_get_vf_config      = be_get_vf_config,
4050 #ifdef CONFIG_NET_POLL_CONTROLLER
4051         .ndo_poll_controller    = be_netpoll,
4052 #endif
4053         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4054         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4055 #ifdef CONFIG_NET_RX_BUSY_POLL
4056         .ndo_busy_poll          = be_busy_poll
4057 #endif
4058 };
4059
4060 static void be_netdev_init(struct net_device *netdev)
4061 {
4062         struct be_adapter *adapter = netdev_priv(netdev);
4063
4064         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4065                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4066                 NETIF_F_HW_VLAN_CTAG_TX;
4067         if (be_multi_rxq(adapter))
4068                 netdev->hw_features |= NETIF_F_RXHASH;
4069
4070         netdev->features |= netdev->hw_features |
4071                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4072
4073         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4074                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4075
4076         netdev->priv_flags |= IFF_UNICAST_FLT;
4077
4078         netdev->flags |= IFF_MULTICAST;
4079
4080         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4081
4082         netdev->netdev_ops = &be_netdev_ops;
4083
4084         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4085 }
4086
4087 static void be_unmap_pci_bars(struct be_adapter *adapter)
4088 {
4089         if (adapter->csr)
4090                 pci_iounmap(adapter->pdev, adapter->csr);
4091         if (adapter->db)
4092                 pci_iounmap(adapter->pdev, adapter->db);
4093 }
4094
4095 static int db_bar(struct be_adapter *adapter)
4096 {
4097         if (lancer_chip(adapter) || !be_physfn(adapter))
4098                 return 0;
4099         else
4100                 return 4;
4101 }
4102
4103 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4104 {
4105         if (skyhawk_chip(adapter)) {
4106                 adapter->roce_db.size = 4096;
4107                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4108                                                               db_bar(adapter));
4109                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4110                                                                db_bar(adapter));
4111         }
4112         return 0;
4113 }
4114
4115 static int be_map_pci_bars(struct be_adapter *adapter)
4116 {
4117         u8 __iomem *addr;
4118
4119         if (BEx_chip(adapter) && be_physfn(adapter)) {
4120                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4121                 if (adapter->csr == NULL)
4122                         return -ENOMEM;
4123         }
4124
4125         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4126         if (addr == NULL)
4127                 goto pci_map_err;
4128         adapter->db = addr;
4129
4130         be_roce_map_pci_bars(adapter);
4131         return 0;
4132
4133 pci_map_err:
4134         be_unmap_pci_bars(adapter);
4135         return -ENOMEM;
4136 }
4137
4138 static void be_ctrl_cleanup(struct be_adapter *adapter)
4139 {
4140         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4141
4142         be_unmap_pci_bars(adapter);
4143
4144         if (mem->va)
4145                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4146                                   mem->dma);
4147
4148         mem = &adapter->rx_filter;
4149         if (mem->va)
4150                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4151                                   mem->dma);
4152 }
4153
4154 static int be_ctrl_init(struct be_adapter *adapter)
4155 {
4156         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4157         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4158         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4159         u32 sli_intf;
4160         int status;
4161
4162         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4163         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4164                                  SLI_INTF_FAMILY_SHIFT;
4165         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4166
4167         status = be_map_pci_bars(adapter);
4168         if (status)
4169                 goto done;
4170
4171         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4172         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4173                                                 mbox_mem_alloc->size,
4174                                                 &mbox_mem_alloc->dma,
4175                                                 GFP_KERNEL);
4176         if (!mbox_mem_alloc->va) {
4177                 status = -ENOMEM;
4178                 goto unmap_pci_bars;
4179         }
4180         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4181         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4182         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4183         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4184
4185         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4186         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4187                                             rx_filter->size, &rx_filter->dma,
4188                                             GFP_KERNEL);
4189         if (rx_filter->va == NULL) {
4190                 status = -ENOMEM;
4191                 goto free_mbox;
4192         }
4193
4194         mutex_init(&adapter->mbox_lock);
4195         spin_lock_init(&adapter->mcc_lock);
4196         spin_lock_init(&adapter->mcc_cq_lock);
4197
4198         init_completion(&adapter->et_cmd_compl);
4199         pci_save_state(adapter->pdev);
4200         return 0;
4201
4202 free_mbox:
4203         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4204                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4205
4206 unmap_pci_bars:
4207         be_unmap_pci_bars(adapter);
4208
4209 done:
4210         return status;
4211 }
4212
4213 static void be_stats_cleanup(struct be_adapter *adapter)
4214 {
4215         struct be_dma_mem *cmd = &adapter->stats_cmd;
4216
4217         if (cmd->va)
4218                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4219                                   cmd->va, cmd->dma);
4220 }
4221
4222 static int be_stats_init(struct be_adapter *adapter)
4223 {
4224         struct be_dma_mem *cmd = &adapter->stats_cmd;
4225
4226         if (lancer_chip(adapter))
4227                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4228         else if (BE2_chip(adapter))
4229                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4230         else if (BE3_chip(adapter))
4231                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4232         else
4233                 /* ALL non-BE ASICs */
4234                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4235
4236         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4237                                       GFP_KERNEL);
4238         if (cmd->va == NULL)
4239                 return -1;
4240         return 0;
4241 }
4242
4243 static void be_remove(struct pci_dev *pdev)
4244 {
4245         struct be_adapter *adapter = pci_get_drvdata(pdev);
4246
4247         if (!adapter)
4248                 return;
4249
4250         be_roce_dev_remove(adapter);
4251         be_intr_set(adapter, false);
4252
4253         cancel_delayed_work_sync(&adapter->func_recovery_work);
4254
4255         unregister_netdev(adapter->netdev);
4256
4257         be_clear(adapter);
4258
4259         /* tell fw we're done with firing cmds */
4260         be_cmd_fw_clean(adapter);
4261
4262         be_stats_cleanup(adapter);
4263
4264         be_ctrl_cleanup(adapter);
4265
4266         pci_disable_pcie_error_reporting(pdev);
4267
4268         pci_release_regions(pdev);
4269         pci_disable_device(pdev);
4270
4271         free_netdev(adapter->netdev);
4272 }
4273
4274 static int be_get_initial_config(struct be_adapter *adapter)
4275 {
4276         int status, level;
4277
4278         status = be_cmd_get_cntl_attributes(adapter);
4279         if (status)
4280                 return status;
4281
4282         /* Must be a power of 2 or else MODULO will BUG_ON */
4283         adapter->be_get_temp_freq = 64;
4284
4285         if (BEx_chip(adapter)) {
4286                 level = be_cmd_get_fw_log_level(adapter);
4287                 adapter->msg_enable =
4288                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4289         }
4290
4291         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4292         return 0;
4293 }
4294
4295 static int lancer_recover_func(struct be_adapter *adapter)
4296 {
4297         struct device *dev = &adapter->pdev->dev;
4298         int status;
4299
4300         status = lancer_test_and_set_rdy_state(adapter);
4301         if (status)
4302                 goto err;
4303
4304         if (netif_running(adapter->netdev))
4305                 be_close(adapter->netdev);
4306
4307         be_clear(adapter);
4308
4309         be_clear_all_error(adapter);
4310
4311         status = be_setup(adapter);
4312         if (status)
4313                 goto err;
4314
4315         if (netif_running(adapter->netdev)) {
4316                 status = be_open(adapter->netdev);
4317                 if (status)
4318                         goto err;
4319         }
4320
4321         dev_err(dev, "Adapter recovery successful\n");
4322         return 0;
4323 err:
4324         if (status == -EAGAIN)
4325                 dev_err(dev, "Waiting for resource provisioning\n");
4326         else
4327                 dev_err(dev, "Adapter recovery failed\n");
4328
4329         return status;
4330 }
4331
4332 static void be_func_recovery_task(struct work_struct *work)
4333 {
4334         struct be_adapter *adapter =
4335                 container_of(work, struct be_adapter,  func_recovery_work.work);
4336         int status = 0;
4337
4338         be_detect_error(adapter);
4339
4340         if (adapter->hw_error && lancer_chip(adapter)) {
4341
4342                 rtnl_lock();
4343                 netif_device_detach(adapter->netdev);
4344                 rtnl_unlock();
4345
4346                 status = lancer_recover_func(adapter);
4347                 if (!status)
4348                         netif_device_attach(adapter->netdev);
4349         }
4350
4351         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4352          * no need to attempt further recovery.
4353          */
4354         if (!status || status == -EAGAIN)
4355                 schedule_delayed_work(&adapter->func_recovery_work,
4356                                       msecs_to_jiffies(1000));
4357 }
4358
4359 static void be_worker(struct work_struct *work)
4360 {
4361         struct be_adapter *adapter =
4362                 container_of(work, struct be_adapter, work.work);
4363         struct be_rx_obj *rxo;
4364         int i;
4365
4366         /* when interrupts are not yet enabled, just reap any pending
4367         * mcc completions */
4368         if (!netif_running(adapter->netdev)) {
4369                 local_bh_disable();
4370                 be_process_mcc(adapter);
4371                 local_bh_enable();
4372                 goto reschedule;
4373         }
4374
4375         if (!adapter->stats_cmd_sent) {
4376                 if (lancer_chip(adapter))
4377                         lancer_cmd_get_pport_stats(adapter,
4378                                                 &adapter->stats_cmd);
4379                 else
4380                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4381         }
4382
4383         if (be_physfn(adapter) &&
4384             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4385                 be_cmd_get_die_temperature(adapter);
4386
4387         for_all_rx_queues(adapter, rxo, i) {
4388                 /* Replenish RX-queues starved due to memory
4389                  * allocation failures.
4390                  */
4391                 if (rxo->rx_post_starved)
4392                         be_post_rx_frags(rxo, GFP_KERNEL);
4393         }
4394
4395         be_eqd_update(adapter);
4396
4397 reschedule:
4398         adapter->work_counter++;
4399         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4400 }
4401
4402 /* If any VFs are already enabled don't FLR the PF */
4403 static bool be_reset_required(struct be_adapter *adapter)
4404 {
4405         return pci_num_vf(adapter->pdev) ? false : true;
4406 }
4407
4408 static char *mc_name(struct be_adapter *adapter)
4409 {
4410         if (adapter->function_mode & FLEX10_MODE)
4411                 return "FLEX10";
4412         else if (adapter->function_mode & VNIC_MODE)
4413                 return "vNIC";
4414         else if (adapter->function_mode & UMC_ENABLED)
4415                 return "UMC";
4416         else
4417                 return "";
4418 }
4419
4420 static inline char *func_name(struct be_adapter *adapter)
4421 {
4422         return be_physfn(adapter) ? "PF" : "VF";
4423 }
4424
4425 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4426 {
4427         int status = 0;
4428         struct be_adapter *adapter;
4429         struct net_device *netdev;
4430         char port_name;
4431
4432         status = pci_enable_device(pdev);
4433         if (status)
4434                 goto do_none;
4435
4436         status = pci_request_regions(pdev, DRV_NAME);
4437         if (status)
4438                 goto disable_dev;
4439         pci_set_master(pdev);
4440
4441         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4442         if (netdev == NULL) {
4443                 status = -ENOMEM;
4444                 goto rel_reg;
4445         }
4446         adapter = netdev_priv(netdev);
4447         adapter->pdev = pdev;
4448         pci_set_drvdata(pdev, adapter);
4449         adapter->netdev = netdev;
4450         SET_NETDEV_DEV(netdev, &pdev->dev);
4451
4452         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4453         if (!status) {
4454                 netdev->features |= NETIF_F_HIGHDMA;
4455         } else {
4456                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4457                 if (status) {
4458                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4459                         goto free_netdev;
4460                 }
4461         }
4462
4463         if (be_physfn(adapter)) {
4464                 status = pci_enable_pcie_error_reporting(pdev);
4465                 if (!status)
4466                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4467         }
4468
4469         status = be_ctrl_init(adapter);
4470         if (status)
4471                 goto free_netdev;
4472
4473         /* sync up with fw's ready state */
4474         if (be_physfn(adapter)) {
4475                 status = be_fw_wait_ready(adapter);
4476                 if (status)
4477                         goto ctrl_clean;
4478         }
4479
4480         if (be_reset_required(adapter)) {
4481                 status = be_cmd_reset_function(adapter);
4482                 if (status)
4483                         goto ctrl_clean;
4484
4485                 /* Wait for interrupts to quiesce after an FLR */
4486                 msleep(100);
4487         }
4488
4489         /* Allow interrupts for other ULPs running on NIC function */
4490         be_intr_set(adapter, true);
4491
4492         /* tell fw we're ready to fire cmds */
4493         status = be_cmd_fw_init(adapter);
4494         if (status)
4495                 goto ctrl_clean;
4496
4497         status = be_stats_init(adapter);
4498         if (status)
4499                 goto ctrl_clean;
4500
4501         status = be_get_initial_config(adapter);
4502         if (status)
4503                 goto stats_clean;
4504
4505         INIT_DELAYED_WORK(&adapter->work, be_worker);
4506         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4507         adapter->rx_fc = adapter->tx_fc = true;
4508
4509         status = be_setup(adapter);
4510         if (status)
4511                 goto stats_clean;
4512
4513         be_netdev_init(netdev);
4514         status = register_netdev(netdev);
4515         if (status != 0)
4516                 goto unsetup;
4517
4518         be_roce_dev_add(adapter);
4519
4520         schedule_delayed_work(&adapter->func_recovery_work,
4521                               msecs_to_jiffies(1000));
4522
4523         be_cmd_query_port_name(adapter, &port_name);
4524
4525         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4526                  func_name(adapter), mc_name(adapter), port_name);
4527
4528         return 0;
4529
4530 unsetup:
4531         be_clear(adapter);
4532 stats_clean:
4533         be_stats_cleanup(adapter);
4534 ctrl_clean:
4535         be_ctrl_cleanup(adapter);
4536 free_netdev:
4537         free_netdev(netdev);
4538 rel_reg:
4539         pci_release_regions(pdev);
4540 disable_dev:
4541         pci_disable_device(pdev);
4542 do_none:
4543         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4544         return status;
4545 }
4546
4547 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4548 {
4549         struct be_adapter *adapter = pci_get_drvdata(pdev);
4550         struct net_device *netdev =  adapter->netdev;
4551
4552         if (adapter->wol_en)
4553                 be_setup_wol(adapter, true);
4554
4555         be_intr_set(adapter, false);
4556         cancel_delayed_work_sync(&adapter->func_recovery_work);
4557
4558         netif_device_detach(netdev);
4559         if (netif_running(netdev)) {
4560                 rtnl_lock();
4561                 be_close(netdev);
4562                 rtnl_unlock();
4563         }
4564         be_clear(adapter);
4565
4566         pci_save_state(pdev);
4567         pci_disable_device(pdev);
4568         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4569         return 0;
4570 }
4571
4572 static int be_resume(struct pci_dev *pdev)
4573 {
4574         int status = 0;
4575         struct be_adapter *adapter = pci_get_drvdata(pdev);
4576         struct net_device *netdev =  adapter->netdev;
4577
4578         netif_device_detach(netdev);
4579
4580         status = pci_enable_device(pdev);
4581         if (status)
4582                 return status;
4583
4584         pci_set_power_state(pdev, PCI_D0);
4585         pci_restore_state(pdev);
4586
4587         status = be_fw_wait_ready(adapter);
4588         if (status)
4589                 return status;
4590
4591         be_intr_set(adapter, true);
4592         /* tell fw we're ready to fire cmds */
4593         status = be_cmd_fw_init(adapter);
4594         if (status)
4595                 return status;
4596
4597         be_setup(adapter);
4598         if (netif_running(netdev)) {
4599                 rtnl_lock();
4600                 be_open(netdev);
4601                 rtnl_unlock();
4602         }
4603
4604         schedule_delayed_work(&adapter->func_recovery_work,
4605                               msecs_to_jiffies(1000));
4606         netif_device_attach(netdev);
4607
4608         if (adapter->wol_en)
4609                 be_setup_wol(adapter, false);
4610
4611         return 0;
4612 }
4613
4614 /*
4615  * An FLR will stop BE from DMAing any data.
4616  */
4617 static void be_shutdown(struct pci_dev *pdev)
4618 {
4619         struct be_adapter *adapter = pci_get_drvdata(pdev);
4620
4621         if (!adapter)
4622                 return;
4623
4624         cancel_delayed_work_sync(&adapter->work);
4625         cancel_delayed_work_sync(&adapter->func_recovery_work);
4626
4627         netif_device_detach(adapter->netdev);
4628
4629         be_cmd_reset_function(adapter);
4630
4631         pci_disable_device(pdev);
4632 }
4633
4634 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4635                                 pci_channel_state_t state)
4636 {
4637         struct be_adapter *adapter = pci_get_drvdata(pdev);
4638         struct net_device *netdev =  adapter->netdev;
4639
4640         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4641
4642         if (!adapter->eeh_error) {
4643                 adapter->eeh_error = true;
4644
4645                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4646
4647                 rtnl_lock();
4648                 netif_device_detach(netdev);
4649                 if (netif_running(netdev))
4650                         be_close(netdev);
4651                 rtnl_unlock();
4652
4653                 be_clear(adapter);
4654         }
4655
4656         if (state == pci_channel_io_perm_failure)
4657                 return PCI_ERS_RESULT_DISCONNECT;
4658
4659         pci_disable_device(pdev);
4660
4661         /* The error could cause the FW to trigger a flash debug dump.
4662          * Resetting the card while flash dump is in progress
4663          * can cause it not to recover; wait for it to finish.
4664          * Wait only for first function as it is needed only once per
4665          * adapter.
4666          */
4667         if (pdev->devfn == 0)
4668                 ssleep(30);
4669
4670         return PCI_ERS_RESULT_NEED_RESET;
4671 }
4672
4673 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4674 {
4675         struct be_adapter *adapter = pci_get_drvdata(pdev);
4676         int status;
4677
4678         dev_info(&adapter->pdev->dev, "EEH reset\n");
4679
4680         status = pci_enable_device(pdev);
4681         if (status)
4682                 return PCI_ERS_RESULT_DISCONNECT;
4683
4684         pci_set_master(pdev);
4685         pci_set_power_state(pdev, PCI_D0);
4686         pci_restore_state(pdev);
4687
4688         /* Check if card is ok and fw is ready */
4689         dev_info(&adapter->pdev->dev,
4690                  "Waiting for FW to be ready after EEH reset\n");
4691         status = be_fw_wait_ready(adapter);
4692         if (status)
4693                 return PCI_ERS_RESULT_DISCONNECT;
4694
4695         pci_cleanup_aer_uncorrect_error_status(pdev);
4696         be_clear_all_error(adapter);
4697         return PCI_ERS_RESULT_RECOVERED;
4698 }
4699
4700 static void be_eeh_resume(struct pci_dev *pdev)
4701 {
4702         int status = 0;
4703         struct be_adapter *adapter = pci_get_drvdata(pdev);
4704         struct net_device *netdev =  adapter->netdev;
4705
4706         dev_info(&adapter->pdev->dev, "EEH resume\n");
4707
4708         pci_save_state(pdev);
4709
4710         status = be_cmd_reset_function(adapter);
4711         if (status)
4712                 goto err;
4713
4714         /* tell fw we're ready to fire cmds */
4715         status = be_cmd_fw_init(adapter);
4716         if (status)
4717                 goto err;
4718
4719         status = be_setup(adapter);
4720         if (status)
4721                 goto err;
4722
4723         if (netif_running(netdev)) {
4724                 status = be_open(netdev);
4725                 if (status)
4726                         goto err;
4727         }
4728
4729         schedule_delayed_work(&adapter->func_recovery_work,
4730                               msecs_to_jiffies(1000));
4731         netif_device_attach(netdev);
4732         return;
4733 err:
4734         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4735 }
4736
4737 static const struct pci_error_handlers be_eeh_handlers = {
4738         .error_detected = be_eeh_err_detected,
4739         .slot_reset = be_eeh_reset,
4740         .resume = be_eeh_resume,
4741 };
4742
4743 static struct pci_driver be_driver = {
4744         .name = DRV_NAME,
4745         .id_table = be_dev_ids,
4746         .probe = be_probe,
4747         .remove = be_remove,
4748         .suspend = be_suspend,
4749         .resume = be_resume,
4750         .shutdown = be_shutdown,
4751         .err_handler = &be_eeh_handlers
4752 };
4753
4754 static int __init be_init_module(void)
4755 {
4756         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4757             rx_frag_size != 2048) {
4758                 printk(KERN_WARNING DRV_NAME
4759                         " : Module param rx_frag_size must be 2048/4096/8192."
4760                         " Using 2048\n");
4761                 rx_frag_size = 2048;
4762         }
4763
4764         return pci_register_driver(&be_driver);
4765 }
4766 module_init(be_init_module);
4767
4768 static void __exit be_exit_module(void)
4769 {
4770         pci_unregister_driver(&be_driver);
4771 }
4772 module_exit(be_exit_module);