]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/emulex/benet/be_main.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[~andy/linux] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26
27 MODULE_VERSION(DRV_VER);
28 MODULE_DEVICE_TABLE(pci, be_dev_ids);
29 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
30 MODULE_AUTHOR("Emulex Corporation");
31 MODULE_LICENSE("GPL");
32
33 static unsigned int num_vfs;
34 module_param(num_vfs, uint, S_IRUGO);
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static ushort rx_frag_size = 2048;
38 module_param(rx_frag_size, ushort, S_IRUGO);
39 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
40
41 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
42         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
44         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
50         { 0 }
51 };
52 MODULE_DEVICE_TABLE(pci, be_dev_ids);
53 /* UE Status Low CSR */
54 static const char * const ue_status_low_desc[] = {
55         "CEV",
56         "CTX",
57         "DBUF",
58         "ERX",
59         "Host",
60         "MPU",
61         "NDMA",
62         "PTC ",
63         "RDMA ",
64         "RXF ",
65         "RXIPS ",
66         "RXULP0 ",
67         "RXULP1 ",
68         "RXULP2 ",
69         "TIM ",
70         "TPOST ",
71         "TPRE ",
72         "TXIPS ",
73         "TXULP0 ",
74         "TXULP1 ",
75         "UC ",
76         "WDMA ",
77         "TXULP2 ",
78         "HOST1 ",
79         "P0_OB_LINK ",
80         "P1_OB_LINK ",
81         "HOST_GPIO ",
82         "MBOX ",
83         "AXGMAC0",
84         "AXGMAC1",
85         "JTAG",
86         "MPU_INTPEND"
87 };
88 /* UE Status High CSR */
89 static const char * const ue_status_hi_desc[] = {
90         "LPCMEMHOST",
91         "MGMT_MAC",
92         "PCS0ONLINE",
93         "MPU_IRAM",
94         "PCS1ONLINE",
95         "PCTL0",
96         "PCTL1",
97         "PMEM",
98         "RR",
99         "TXPB",
100         "RXPP",
101         "XAUI",
102         "TXP",
103         "ARM",
104         "IPC",
105         "HOST2",
106         "HOST3",
107         "HOST4",
108         "HOST5",
109         "HOST6",
110         "HOST7",
111         "HOST8",
112         "HOST9",
113         "NETC",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown",
120         "Unknown",
121         "Unknown"
122 };
123
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va) {
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131                 mem->va = NULL;
132         }
133 }
134
135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136                 u16 len, u16 entry_size)
137 {
138         struct be_dma_mem *mem = &q->dma_mem;
139
140         memset(q, 0, sizeof(*q));
141         q->len = len;
142         q->entry_size = entry_size;
143         mem->size = len * entry_size;
144         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145                                       GFP_KERNEL);
146         if (!mem->va)
147                 return -ENOMEM;
148         return 0;
149 }
150
151 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
152 {
153         u32 reg, enabled;
154
155         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
156                                 &reg);
157         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
158
159         if (!enabled && enable)
160                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
161         else if (enabled && !enable)
162                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else
164                 return;
165
166         pci_write_config_dword(adapter->pdev,
167                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
168 }
169
170 static void be_intr_set(struct be_adapter *adapter, bool enable)
171 {
172         int status = 0;
173
174         /* On lancer interrupts can't be controlled via this register */
175         if (lancer_chip(adapter))
176                 return;
177
178         if (adapter->eeh_error)
179                 return;
180
181         status = be_cmd_intr_set(adapter, enable);
182         if (status)
183                 be_reg_intr_set(adapter, enable);
184 }
185
186 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_RQ_RING_ID_MASK;
190         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_RQ_OFFSET);
194 }
195
196 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
197                           u16 posted)
198 {
199         u32 val = 0;
200         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
201         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
202
203         wmb();
204         iowrite32(val, adapter->db + txo->db_offset);
205 }
206
207 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
208                 bool arm, bool clear_int, u16 num_popped)
209 {
210         u32 val = 0;
211         val |= qid & DB_EQ_RING_ID_MASK;
212         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
213                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter))  {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats =
494                                         pport_stats_from_cmd(adapter);
495
496         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
497         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
498         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
499         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
500         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
501         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
502         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
503         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
504         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
505         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
506         drvs->rx_dropped_tcp_length =
507                                 pport_stats->rx_dropped_invalid_tcp_length;
508         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
509         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
510         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
511         drvs->rx_dropped_header_too_small =
512                                 pport_stats->rx_dropped_header_too_small;
513         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
514         drvs->rx_address_filtered =
515                                         pport_stats->rx_address_filtered +
516                                         pport_stats->rx_vlan_filtered;
517         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
518         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
519         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
520         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
521         drvs->jabber_events = pport_stats->rx_jabbers;
522         drvs->forwarded_packets = pport_stats->num_forwards_lo;
523         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
524         drvs->rx_drops_too_many_frags =
525                                 pport_stats->rx_drops_too_many_frags_lo;
526 }
527
528 static void accumulate_16bit_val(u32 *acc, u16 val)
529 {
530 #define lo(x)                   (x & 0xFFFF)
531 #define hi(x)                   (x & 0xFFFF0000)
532         bool wrapped = val < lo(*acc);
533         u32 newacc = hi(*acc) + val;
534
535         if (wrapped)
536                 newacc += 65536;
537         ACCESS_ONCE(*acc) = newacc;
538 }
539
540 static void populate_erx_stats(struct be_adapter *adapter,
541                         struct be_rx_obj *rxo,
542                         u32 erx_stat)
543 {
544         if (!BEx_chip(adapter))
545                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
546         else
547                 /* below erx HW counter can actually wrap around after
548                  * 65535. Driver accumulates a 32-bit value
549                  */
550                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
551                                      (u16)erx_stat);
552 }
553
554 void be_parse_stats(struct be_adapter *adapter)
555 {
556         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
557         struct be_rx_obj *rxo;
558         int i;
559         u32 erx_stat;
560
561         if (lancer_chip(adapter)) {
562                 populate_lancer_stats(adapter);
563         } else {
564                 if (BE2_chip(adapter))
565                         populate_be_v0_stats(adapter);
566                 else if (BE3_chip(adapter))
567                         /* for BE3 */
568                         populate_be_v1_stats(adapter);
569                 else
570                         populate_be_v2_stats(adapter);
571
572                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
573                 for_all_rx_queues(adapter, rxo, i) {
574                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
575                         populate_erx_stats(adapter, rxo, erx_stat);
576                 }
577         }
578 }
579
580 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
581                                         struct rtnl_link_stats64 *stats)
582 {
583         struct be_adapter *adapter = netdev_priv(netdev);
584         struct be_drv_stats *drvs = &adapter->drv_stats;
585         struct be_rx_obj *rxo;
586         struct be_tx_obj *txo;
587         u64 pkts, bytes;
588         unsigned int start;
589         int i;
590
591         for_all_rx_queues(adapter, rxo, i) {
592                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
593                 do {
594                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
595                         pkts = rx_stats(rxo)->rx_pkts;
596                         bytes = rx_stats(rxo)->rx_bytes;
597                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
598                 stats->rx_packets += pkts;
599                 stats->rx_bytes += bytes;
600                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
601                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
602                                         rx_stats(rxo)->rx_drops_no_frags;
603         }
604
605         for_all_tx_queues(adapter, txo, i) {
606                 const struct be_tx_stats *tx_stats = tx_stats(txo);
607                 do {
608                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
609                         pkts = tx_stats(txo)->tx_pkts;
610                         bytes = tx_stats(txo)->tx_bytes;
611                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
612                 stats->tx_packets += pkts;
613                 stats->tx_bytes += bytes;
614         }
615
616         /* bad pkts received */
617         stats->rx_errors = drvs->rx_crc_errors +
618                 drvs->rx_alignment_symbol_errors +
619                 drvs->rx_in_range_errors +
620                 drvs->rx_out_range_errors +
621                 drvs->rx_frame_too_long +
622                 drvs->rx_dropped_too_small +
623                 drvs->rx_dropped_too_short +
624                 drvs->rx_dropped_header_too_small +
625                 drvs->rx_dropped_tcp_length +
626                 drvs->rx_dropped_runt;
627
628         /* detailed rx errors */
629         stats->rx_length_errors = drvs->rx_in_range_errors +
630                 drvs->rx_out_range_errors +
631                 drvs->rx_frame_too_long;
632
633         stats->rx_crc_errors = drvs->rx_crc_errors;
634
635         /* frame alignment errors */
636         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
637
638         /* receiver fifo overrun */
639         /* drops_no_pbuf is no per i/f, it's per BE card */
640         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
641                                 drvs->rx_input_fifo_overflow_drop +
642                                 drvs->rx_drops_no_pbuf;
643         return stats;
644 }
645
646 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
647 {
648         struct net_device *netdev = adapter->netdev;
649
650         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
651                 netif_carrier_off(netdev);
652                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
653         }
654
655         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
656                 netif_carrier_on(netdev);
657         else
658                 netif_carrier_off(netdev);
659 }
660
661 static void be_tx_stats_update(struct be_tx_obj *txo,
662                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
663 {
664         struct be_tx_stats *stats = tx_stats(txo);
665
666         u64_stats_update_begin(&stats->sync);
667         stats->tx_reqs++;
668         stats->tx_wrbs += wrb_cnt;
669         stats->tx_bytes += copied;
670         stats->tx_pkts += (gso_segs ? gso_segs : 1);
671         if (stopped)
672                 stats->tx_stops++;
673         u64_stats_update_end(&stats->sync);
674 }
675
676 /* Determine number of WRB entries needed to xmit data in an skb */
677 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
678                                                                 bool *dummy)
679 {
680         int cnt = (skb->len > skb->data_len);
681
682         cnt += skb_shinfo(skb)->nr_frags;
683
684         /* to account for hdr wrb */
685         cnt++;
686         if (lancer_chip(adapter) || !(cnt & 1)) {
687                 *dummy = false;
688         } else {
689                 /* add a dummy to make it an even num */
690                 cnt++;
691                 *dummy = true;
692         }
693         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
694         return cnt;
695 }
696
697 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
698 {
699         wrb->frag_pa_hi = upper_32_bits(addr);
700         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
701         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
702         wrb->rsvd0 = 0;
703 }
704
705 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
706                                         struct sk_buff *skb)
707 {
708         u8 vlan_prio;
709         u16 vlan_tag;
710
711         vlan_tag = vlan_tx_tag_get(skb);
712         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
713         /* If vlan priority provided by OS is NOT in available bmap */
714         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
715                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
716                                 adapter->recommended_prio;
717
718         return vlan_tag;
719 }
720
721 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
722                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
723 {
724         u16 vlan_tag;
725
726         memset(hdr, 0, sizeof(*hdr));
727
728         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
729
730         if (skb_is_gso(skb)) {
731                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
732                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
733                         hdr, skb_shinfo(skb)->gso_size);
734                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
735                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
736         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
737                 if (is_tcp_pkt(skb))
738                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
739                 else if (is_udp_pkt(skb))
740                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
741         }
742
743         if (vlan_tx_tag_present(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
745                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
746                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
747         }
748
749         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
750         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
751         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
752         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
753         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
754 }
755
756 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
757                 bool unmap_single)
758 {
759         dma_addr_t dma;
760
761         be_dws_le_to_cpu(wrb, sizeof(*wrb));
762
763         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
764         if (wrb->frag_len) {
765                 if (unmap_single)
766                         dma_unmap_single(dev, dma, wrb->frag_len,
767                                          DMA_TO_DEVICE);
768                 else
769                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
770         }
771 }
772
773 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
774                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
775                 bool skip_hw_vlan)
776 {
777         dma_addr_t busaddr;
778         int i, copied = 0;
779         struct device *dev = &adapter->pdev->dev;
780         struct sk_buff *first_skb = skb;
781         struct be_eth_wrb *wrb;
782         struct be_eth_hdr_wrb *hdr;
783         bool map_single = false;
784         u16 map_head;
785
786         hdr = queue_head_node(txq);
787         queue_head_inc(txq);
788         map_head = txq->head;
789
790         if (skb->len > skb->data_len) {
791                 int len = skb_headlen(skb);
792                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
793                 if (dma_mapping_error(dev, busaddr))
794                         goto dma_err;
795                 map_single = true;
796                 wrb = queue_head_node(txq);
797                 wrb_fill(wrb, busaddr, len);
798                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
799                 queue_head_inc(txq);
800                 copied += len;
801         }
802
803         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
804                 const struct skb_frag_struct *frag =
805                         &skb_shinfo(skb)->frags[i];
806                 busaddr = skb_frag_dma_map(dev, frag, 0,
807                                            skb_frag_size(frag), DMA_TO_DEVICE);
808                 if (dma_mapping_error(dev, busaddr))
809                         goto dma_err;
810                 wrb = queue_head_node(txq);
811                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
812                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
813                 queue_head_inc(txq);
814                 copied += skb_frag_size(frag);
815         }
816
817         if (dummy_wrb) {
818                 wrb = queue_head_node(txq);
819                 wrb_fill(wrb, 0, 0);
820                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
821                 queue_head_inc(txq);
822         }
823
824         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
825         be_dws_cpu_to_le(hdr, sizeof(*hdr));
826
827         return copied;
828 dma_err:
829         txq->head = map_head;
830         while (copied) {
831                 wrb = queue_head_node(txq);
832                 unmap_tx_frag(dev, wrb, map_single);
833                 map_single = false;
834                 copied -= wrb->frag_len;
835                 queue_head_inc(txq);
836         }
837         return 0;
838 }
839
840 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
841                                              struct sk_buff *skb,
842                                              bool *skip_hw_vlan)
843 {
844         u16 vlan_tag = 0;
845
846         skb = skb_share_check(skb, GFP_ATOMIC);
847         if (unlikely(!skb))
848                 return skb;
849
850         if (vlan_tx_tag_present(skb))
851                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
852
853         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
854                 if (!vlan_tag)
855                         vlan_tag = adapter->pvid;
856                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
857                  * skip VLAN insertion
858                  */
859                 if (skip_hw_vlan)
860                         *skip_hw_vlan = true;
861         }
862
863         if (vlan_tag) {
864                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
865                 if (unlikely(!skb))
866                         return skb;
867                 skb->vlan_tci = 0;
868         }
869
870         /* Insert the outer VLAN, if any */
871         if (adapter->qnq_vid) {
872                 vlan_tag = adapter->qnq_vid;
873                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
874                 if (unlikely(!skb))
875                         return skb;
876                 if (skip_hw_vlan)
877                         *skip_hw_vlan = true;
878         }
879
880         return skb;
881 }
882
883 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
884 {
885         struct ethhdr *eh = (struct ethhdr *)skb->data;
886         u16 offset = ETH_HLEN;
887
888         if (eh->h_proto == htons(ETH_P_IPV6)) {
889                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
890
891                 offset += sizeof(struct ipv6hdr);
892                 if (ip6h->nexthdr != NEXTHDR_TCP &&
893                     ip6h->nexthdr != NEXTHDR_UDP) {
894                         struct ipv6_opt_hdr *ehdr =
895                                 (struct ipv6_opt_hdr *) (skb->data + offset);
896
897                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
898                         if (ehdr->hdrlen == 0xff)
899                                 return true;
900                 }
901         }
902         return false;
903 }
904
905 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
906 {
907         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
908 }
909
910 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
911                                 struct sk_buff *skb)
912 {
913         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
914 }
915
916 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
917                                            struct sk_buff *skb,
918                                            bool *skip_hw_vlan)
919 {
920         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
921         unsigned int eth_hdr_len;
922         struct iphdr *ip;
923
924         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
925          * may cause a transmit stall on that port. So the work-around is to
926          * pad short packets (<= 32 bytes) to a 36-byte length.
927          */
928         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
929                 if (skb_padto(skb, 36))
930                         goto tx_drop;
931                 skb->len = 36;
932         }
933
934         /* For padded packets, BE HW modifies tot_len field in IP header
935          * incorrecly when VLAN tag is inserted by HW.
936          * For padded packets, Lancer computes incorrect checksum.
937          */
938         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
939                                                 VLAN_ETH_HLEN : ETH_HLEN;
940         if (skb->len <= 60 &&
941             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
942             is_ipv4_pkt(skb)) {
943                 ip = (struct iphdr *)ip_hdr(skb);
944                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
945         }
946
947         /* If vlan tag is already inlined in the packet, skip HW VLAN
948          * tagging in UMC mode
949          */
950         if ((adapter->function_mode & UMC_ENABLED) &&
951             veh->h_vlan_proto == htons(ETH_P_8021Q))
952                         *skip_hw_vlan = true;
953
954         /* HW has a bug wherein it will calculate CSUM for VLAN
955          * pkts even though it is disabled.
956          * Manually insert VLAN in pkt.
957          */
958         if (skb->ip_summed != CHECKSUM_PARTIAL &&
959             vlan_tx_tag_present(skb)) {
960                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
961                 if (unlikely(!skb))
962                         goto tx_drop;
963         }
964
965         /* HW may lockup when VLAN HW tagging is requested on
966          * certain ipv6 packets. Drop such pkts if the HW workaround to
967          * skip HW tagging is not enabled by FW.
968          */
969         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
970             (adapter->pvid || adapter->qnq_vid) &&
971             !qnq_async_evt_rcvd(adapter)))
972                 goto tx_drop;
973
974         /* Manual VLAN tag insertion to prevent:
975          * ASIC lockup when the ASIC inserts VLAN tag into
976          * certain ipv6 packets. Insert VLAN tags in driver,
977          * and set event, completion, vlan bits accordingly
978          * in the Tx WRB.
979          */
980         if (be_ipv6_tx_stall_chk(adapter, skb) &&
981             be_vlan_tag_tx_chk(adapter, skb)) {
982                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
983                 if (unlikely(!skb))
984                         goto tx_drop;
985         }
986
987         return skb;
988 tx_drop:
989         dev_kfree_skb_any(skb);
990         return NULL;
991 }
992
993 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
994 {
995         struct be_adapter *adapter = netdev_priv(netdev);
996         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
997         struct be_queue_info *txq = &txo->q;
998         bool dummy_wrb, stopped = false;
999         u32 wrb_cnt = 0, copied = 0;
1000         bool skip_hw_vlan = false;
1001         u32 start = txq->head;
1002
1003         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1004         if (!skb) {
1005                 tx_stats(txo)->tx_drv_drops++;
1006                 return NETDEV_TX_OK;
1007         }
1008
1009         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1010
1011         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1012                               skip_hw_vlan);
1013         if (copied) {
1014                 int gso_segs = skb_shinfo(skb)->gso_segs;
1015
1016                 /* record the sent skb in the sent_skb table */
1017                 BUG_ON(txo->sent_skb_list[start]);
1018                 txo->sent_skb_list[start] = skb;
1019
1020                 /* Ensure txq has space for the next skb; Else stop the queue
1021                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1022                  * tx compls of the current transmit which'll wake up the queue
1023                  */
1024                 atomic_add(wrb_cnt, &txq->used);
1025                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1026                                                                 txq->len) {
1027                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1028                         stopped = true;
1029                 }
1030
1031                 be_txq_notify(adapter, txo, wrb_cnt);
1032
1033                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1034         } else {
1035                 txq->head = start;
1036                 tx_stats(txo)->tx_drv_drops++;
1037                 dev_kfree_skb_any(skb);
1038         }
1039         return NETDEV_TX_OK;
1040 }
1041
1042 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1043 {
1044         struct be_adapter *adapter = netdev_priv(netdev);
1045         if (new_mtu < BE_MIN_MTU ||
1046                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
1047                                         (ETH_HLEN + ETH_FCS_LEN))) {
1048                 dev_info(&adapter->pdev->dev,
1049                         "MTU must be between %d and %d bytes\n",
1050                         BE_MIN_MTU,
1051                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1052                 return -EINVAL;
1053         }
1054         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1055                         netdev->mtu, new_mtu);
1056         netdev->mtu = new_mtu;
1057         return 0;
1058 }
1059
1060 /*
1061  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1062  * If the user configures more, place BE in vlan promiscuous mode.
1063  */
1064 static int be_vid_config(struct be_adapter *adapter)
1065 {
1066         u16 vids[BE_NUM_VLANS_SUPPORTED];
1067         u16 num = 0, i;
1068         int status = 0;
1069
1070         /* No need to further configure vids if in promiscuous mode */
1071         if (adapter->promiscuous)
1072                 return 0;
1073
1074         if (adapter->vlans_added > be_max_vlans(adapter))
1075                 goto set_vlan_promisc;
1076
1077         /* Construct VLAN Table to give to HW */
1078         for (i = 0; i < VLAN_N_VID; i++)
1079                 if (adapter->vlan_tag[i])
1080                         vids[num++] = cpu_to_le16(i);
1081
1082         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1083                                     vids, num, 0);
1084
1085         if (status) {
1086                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1087                 if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
1088                         goto set_vlan_promisc;
1089                 dev_err(&adapter->pdev->dev,
1090                         "Setting HW VLAN filtering failed.\n");
1091         } else {
1092                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1093                         /* hw VLAN filtering re-enabled. */
1094                         status = be_cmd_rx_filter(adapter,
1095                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1096                         if (!status) {
1097                                 dev_info(&adapter->pdev->dev,
1098                                          "Disabling VLAN Promiscuous mode.\n");
1099                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1100                                 dev_info(&adapter->pdev->dev,
1101                                          "Re-Enabling HW VLAN filtering\n");
1102                         }
1103                 }
1104         }
1105
1106         return status;
1107
1108 set_vlan_promisc:
1109         dev_warn(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
1110
1111         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1112         if (!status) {
1113                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1114                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering\n");
1115                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1116         } else
1117                 dev_err(&adapter->pdev->dev,
1118                         "Failed to enable VLAN Promiscuous mode.\n");
1119         return status;
1120 }
1121
1122 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1123 {
1124         struct be_adapter *adapter = netdev_priv(netdev);
1125         int status = 0;
1126
1127
1128         /* Packets with VID 0 are always received by Lancer by default */
1129         if (lancer_chip(adapter) && vid == 0)
1130                 goto ret;
1131
1132         adapter->vlan_tag[vid] = 1;
1133         if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
1134                 status = be_vid_config(adapter);
1135
1136         if (!status)
1137                 adapter->vlans_added++;
1138         else
1139                 adapter->vlan_tag[vid] = 0;
1140 ret:
1141         return status;
1142 }
1143
1144 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1145 {
1146         struct be_adapter *adapter = netdev_priv(netdev);
1147         int status = 0;
1148
1149         /* Packets with VID 0 are always received by Lancer by default */
1150         if (lancer_chip(adapter) && vid == 0)
1151                 goto ret;
1152
1153         adapter->vlan_tag[vid] = 0;
1154         if (adapter->vlans_added <= be_max_vlans(adapter))
1155                 status = be_vid_config(adapter);
1156
1157         if (!status)
1158                 adapter->vlans_added--;
1159         else
1160                 adapter->vlan_tag[vid] = 1;
1161 ret:
1162         return status;
1163 }
1164
1165 static void be_set_rx_mode(struct net_device *netdev)
1166 {
1167         struct be_adapter *adapter = netdev_priv(netdev);
1168         int status;
1169
1170         if (netdev->flags & IFF_PROMISC) {
1171                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1172                 adapter->promiscuous = true;
1173                 goto done;
1174         }
1175
1176         /* BE was previously in promiscuous mode; disable it */
1177         if (adapter->promiscuous) {
1178                 adapter->promiscuous = false;
1179                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1180
1181                 if (adapter->vlans_added)
1182                         be_vid_config(adapter);
1183         }
1184
1185         /* Enable multicast promisc if num configured exceeds what we support */
1186         if (netdev->flags & IFF_ALLMULTI ||
1187             netdev_mc_count(netdev) > be_max_mc(adapter)) {
1188                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1189                 goto done;
1190         }
1191
1192         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1193                 struct netdev_hw_addr *ha;
1194                 int i = 1; /* First slot is claimed by the Primary MAC */
1195
1196                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1197                         be_cmd_pmac_del(adapter, adapter->if_handle,
1198                                         adapter->pmac_id[i], 0);
1199                 }
1200
1201                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1202                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1203                         adapter->promiscuous = true;
1204                         goto done;
1205                 }
1206
1207                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1208                         adapter->uc_macs++; /* First slot is for Primary MAC */
1209                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1210                                         adapter->if_handle,
1211                                         &adapter->pmac_id[adapter->uc_macs], 0);
1212                 }
1213         }
1214
1215         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1216
1217         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1218         if (status) {
1219                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1220                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1221                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1222         }
1223 done:
1224         return;
1225 }
1226
1227 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1228 {
1229         struct be_adapter *adapter = netdev_priv(netdev);
1230         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1231         int status;
1232
1233         if (!sriov_enabled(adapter))
1234                 return -EPERM;
1235
1236         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1237                 return -EINVAL;
1238
1239         if (BEx_chip(adapter)) {
1240                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1241                                 vf + 1);
1242
1243                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1244                                          &vf_cfg->pmac_id, vf + 1);
1245         } else {
1246                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1247                                         vf + 1);
1248         }
1249
1250         if (status)
1251                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1252                                 mac, vf);
1253         else
1254                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1255
1256         return status;
1257 }
1258
1259 static int be_get_vf_config(struct net_device *netdev, int vf,
1260                         struct ifla_vf_info *vi)
1261 {
1262         struct be_adapter *adapter = netdev_priv(netdev);
1263         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1264
1265         if (!sriov_enabled(adapter))
1266                 return -EPERM;
1267
1268         if (vf >= adapter->num_vfs)
1269                 return -EINVAL;
1270
1271         vi->vf = vf;
1272         vi->tx_rate = vf_cfg->tx_rate;
1273         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1274         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1275         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1276
1277         return 0;
1278 }
1279
1280 static int be_set_vf_vlan(struct net_device *netdev,
1281                         int vf, u16 vlan, u8 qos)
1282 {
1283         struct be_adapter *adapter = netdev_priv(netdev);
1284         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1285         int status = 0;
1286
1287         if (!sriov_enabled(adapter))
1288                 return -EPERM;
1289
1290         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1291                 return -EINVAL;
1292
1293         if (vlan || qos) {
1294                 vlan |= qos << VLAN_PRIO_SHIFT;
1295                 if (vf_cfg->vlan_tag != vlan) {
1296                         /* If this is new value, program it. Else skip. */
1297                         vf_cfg->vlan_tag = vlan;
1298                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1299                                                        vf_cfg->if_handle, 0);
1300                 }
1301         } else {
1302                 /* Reset Transparent Vlan Tagging. */
1303                 vf_cfg->vlan_tag = 0;
1304                 vlan = vf_cfg->def_vid;
1305                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1306                                                vf_cfg->if_handle, 0);
1307         }
1308
1309
1310         if (status)
1311                 dev_info(&adapter->pdev->dev,
1312                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1313         return status;
1314 }
1315
1316 static int be_set_vf_tx_rate(struct net_device *netdev,
1317                         int vf, int rate)
1318 {
1319         struct be_adapter *adapter = netdev_priv(netdev);
1320         int status = 0;
1321
1322         if (!sriov_enabled(adapter))
1323                 return -EPERM;
1324
1325         if (vf >= adapter->num_vfs)
1326                 return -EINVAL;
1327
1328         if (rate < 100 || rate > 10000) {
1329                 dev_err(&adapter->pdev->dev,
1330                         "tx rate must be between 100 and 10000 Mbps\n");
1331                 return -EINVAL;
1332         }
1333
1334         if (lancer_chip(adapter))
1335                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1336         else
1337                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1338
1339         if (status)
1340                 dev_err(&adapter->pdev->dev,
1341                                 "tx rate %d on VF %d failed\n", rate, vf);
1342         else
1343                 adapter->vf_cfg[vf].tx_rate = rate;
1344         return status;
1345 }
1346
1347 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1348                           ulong now)
1349 {
1350         aic->rx_pkts_prev = rx_pkts;
1351         aic->tx_reqs_prev = tx_pkts;
1352         aic->jiffies = now;
1353 }
1354
1355 static void be_eqd_update(struct be_adapter *adapter)
1356 {
1357         struct be_set_eqd set_eqd[MAX_EVT_QS];
1358         int eqd, i, num = 0, start;
1359         struct be_aic_obj *aic;
1360         struct be_eq_obj *eqo;
1361         struct be_rx_obj *rxo;
1362         struct be_tx_obj *txo;
1363         u64 rx_pkts, tx_pkts;
1364         ulong now;
1365         u32 pps, delta;
1366
1367         for_all_evt_queues(adapter, eqo, i) {
1368                 aic = &adapter->aic_obj[eqo->idx];
1369                 if (!aic->enable) {
1370                         if (aic->jiffies)
1371                                 aic->jiffies = 0;
1372                         eqd = aic->et_eqd;
1373                         goto modify_eqd;
1374                 }
1375
1376                 rxo = &adapter->rx_obj[eqo->idx];
1377                 do {
1378                         start = u64_stats_fetch_begin_bh(&rxo->stats.sync);
1379                         rx_pkts = rxo->stats.rx_pkts;
1380                 } while (u64_stats_fetch_retry_bh(&rxo->stats.sync, start));
1381
1382                 txo = &adapter->tx_obj[eqo->idx];
1383                 do {
1384                         start = u64_stats_fetch_begin_bh(&txo->stats.sync);
1385                         tx_pkts = txo->stats.tx_reqs;
1386                 } while (u64_stats_fetch_retry_bh(&txo->stats.sync, start));
1387
1388
1389                 /* Skip, if wrapped around or first calculation */
1390                 now = jiffies;
1391                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1392                     rx_pkts < aic->rx_pkts_prev ||
1393                     tx_pkts < aic->tx_reqs_prev) {
1394                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1395                         continue;
1396                 }
1397
1398                 delta = jiffies_to_msecs(now - aic->jiffies);
1399                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1400                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1401                 eqd = (pps / 15000) << 2;
1402
1403                 if (eqd < 8)
1404                         eqd = 0;
1405                 eqd = min_t(u32, eqd, aic->max_eqd);
1406                 eqd = max_t(u32, eqd, aic->min_eqd);
1407
1408                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1409 modify_eqd:
1410                 if (eqd != aic->prev_eqd) {
1411                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1412                         set_eqd[num].eq_id = eqo->q.id;
1413                         aic->prev_eqd = eqd;
1414                         num++;
1415                 }
1416         }
1417
1418         if (num)
1419                 be_cmd_modify_eqd(adapter, set_eqd, num);
1420 }
1421
1422 static void be_rx_stats_update(struct be_rx_obj *rxo,
1423                 struct be_rx_compl_info *rxcp)
1424 {
1425         struct be_rx_stats *stats = rx_stats(rxo);
1426
1427         u64_stats_update_begin(&stats->sync);
1428         stats->rx_compl++;
1429         stats->rx_bytes += rxcp->pkt_size;
1430         stats->rx_pkts++;
1431         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1432                 stats->rx_mcast_pkts++;
1433         if (rxcp->err)
1434                 stats->rx_compl_err++;
1435         u64_stats_update_end(&stats->sync);
1436 }
1437
1438 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1439 {
1440         /* L4 checksum is not reliable for non TCP/UDP packets.
1441          * Also ignore ipcksm for ipv6 pkts */
1442         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1443                                 (rxcp->ip_csum || rxcp->ipv6);
1444 }
1445
1446 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1447 {
1448         struct be_adapter *adapter = rxo->adapter;
1449         struct be_rx_page_info *rx_page_info;
1450         struct be_queue_info *rxq = &rxo->q;
1451         u16 frag_idx = rxq->tail;
1452
1453         rx_page_info = &rxo->page_info_tbl[frag_idx];
1454         BUG_ON(!rx_page_info->page);
1455
1456         if (rx_page_info->last_page_user) {
1457                 dma_unmap_page(&adapter->pdev->dev,
1458                                dma_unmap_addr(rx_page_info, bus),
1459                                adapter->big_page_size, DMA_FROM_DEVICE);
1460                 rx_page_info->last_page_user = false;
1461         }
1462
1463         queue_tail_inc(rxq);
1464         atomic_dec(&rxq->used);
1465         return rx_page_info;
1466 }
1467
1468 /* Throwaway the data in the Rx completion */
1469 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1470                                 struct be_rx_compl_info *rxcp)
1471 {
1472         struct be_rx_page_info *page_info;
1473         u16 i, num_rcvd = rxcp->num_rcvd;
1474
1475         for (i = 0; i < num_rcvd; i++) {
1476                 page_info = get_rx_page_info(rxo);
1477                 put_page(page_info->page);
1478                 memset(page_info, 0, sizeof(*page_info));
1479         }
1480 }
1481
1482 /*
1483  * skb_fill_rx_data forms a complete skb for an ether frame
1484  * indicated by rxcp.
1485  */
1486 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1487                              struct be_rx_compl_info *rxcp)
1488 {
1489         struct be_rx_page_info *page_info;
1490         u16 i, j;
1491         u16 hdr_len, curr_frag_len, remaining;
1492         u8 *start;
1493
1494         page_info = get_rx_page_info(rxo);
1495         start = page_address(page_info->page) + page_info->page_offset;
1496         prefetch(start);
1497
1498         /* Copy data in the first descriptor of this completion */
1499         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1500
1501         skb->len = curr_frag_len;
1502         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1503                 memcpy(skb->data, start, curr_frag_len);
1504                 /* Complete packet has now been moved to data */
1505                 put_page(page_info->page);
1506                 skb->data_len = 0;
1507                 skb->tail += curr_frag_len;
1508         } else {
1509                 hdr_len = ETH_HLEN;
1510                 memcpy(skb->data, start, hdr_len);
1511                 skb_shinfo(skb)->nr_frags = 1;
1512                 skb_frag_set_page(skb, 0, page_info->page);
1513                 skb_shinfo(skb)->frags[0].page_offset =
1514                                         page_info->page_offset + hdr_len;
1515                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1516                 skb->data_len = curr_frag_len - hdr_len;
1517                 skb->truesize += rx_frag_size;
1518                 skb->tail += hdr_len;
1519         }
1520         page_info->page = NULL;
1521
1522         if (rxcp->pkt_size <= rx_frag_size) {
1523                 BUG_ON(rxcp->num_rcvd != 1);
1524                 return;
1525         }
1526
1527         /* More frags present for this completion */
1528         remaining = rxcp->pkt_size - curr_frag_len;
1529         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1530                 page_info = get_rx_page_info(rxo);
1531                 curr_frag_len = min(remaining, rx_frag_size);
1532
1533                 /* Coalesce all frags from the same physical page in one slot */
1534                 if (page_info->page_offset == 0) {
1535                         /* Fresh page */
1536                         j++;
1537                         skb_frag_set_page(skb, j, page_info->page);
1538                         skb_shinfo(skb)->frags[j].page_offset =
1539                                                         page_info->page_offset;
1540                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1541                         skb_shinfo(skb)->nr_frags++;
1542                 } else {
1543                         put_page(page_info->page);
1544                 }
1545
1546                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1547                 skb->len += curr_frag_len;
1548                 skb->data_len += curr_frag_len;
1549                 skb->truesize += rx_frag_size;
1550                 remaining -= curr_frag_len;
1551                 page_info->page = NULL;
1552         }
1553         BUG_ON(j > MAX_SKB_FRAGS);
1554 }
1555
1556 /* Process the RX completion indicated by rxcp when GRO is disabled */
1557 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1558                                 struct be_rx_compl_info *rxcp)
1559 {
1560         struct be_adapter *adapter = rxo->adapter;
1561         struct net_device *netdev = adapter->netdev;
1562         struct sk_buff *skb;
1563
1564         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1565         if (unlikely(!skb)) {
1566                 rx_stats(rxo)->rx_drops_no_skbs++;
1567                 be_rx_compl_discard(rxo, rxcp);
1568                 return;
1569         }
1570
1571         skb_fill_rx_data(rxo, skb, rxcp);
1572
1573         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1574                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1575         else
1576                 skb_checksum_none_assert(skb);
1577
1578         skb->protocol = eth_type_trans(skb, netdev);
1579         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1580         if (netdev->features & NETIF_F_RXHASH)
1581                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1582         skb_mark_napi_id(skb, napi);
1583
1584         if (rxcp->vlanf)
1585                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1586
1587         netif_receive_skb(skb);
1588 }
1589
1590 /* Process the RX completion indicated by rxcp when GRO is enabled */
1591 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1592                                     struct napi_struct *napi,
1593                                     struct be_rx_compl_info *rxcp)
1594 {
1595         struct be_adapter *adapter = rxo->adapter;
1596         struct be_rx_page_info *page_info;
1597         struct sk_buff *skb = NULL;
1598         u16 remaining, curr_frag_len;
1599         u16 i, j;
1600
1601         skb = napi_get_frags(napi);
1602         if (!skb) {
1603                 be_rx_compl_discard(rxo, rxcp);
1604                 return;
1605         }
1606
1607         remaining = rxcp->pkt_size;
1608         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1609                 page_info = get_rx_page_info(rxo);
1610
1611                 curr_frag_len = min(remaining, rx_frag_size);
1612
1613                 /* Coalesce all frags from the same physical page in one slot */
1614                 if (i == 0 || page_info->page_offset == 0) {
1615                         /* First frag or Fresh page */
1616                         j++;
1617                         skb_frag_set_page(skb, j, page_info->page);
1618                         skb_shinfo(skb)->frags[j].page_offset =
1619                                                         page_info->page_offset;
1620                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1621                 } else {
1622                         put_page(page_info->page);
1623                 }
1624                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1625                 skb->truesize += rx_frag_size;
1626                 remaining -= curr_frag_len;
1627                 memset(page_info, 0, sizeof(*page_info));
1628         }
1629         BUG_ON(j > MAX_SKB_FRAGS);
1630
1631         skb_shinfo(skb)->nr_frags = j + 1;
1632         skb->len = rxcp->pkt_size;
1633         skb->data_len = rxcp->pkt_size;
1634         skb->ip_summed = CHECKSUM_UNNECESSARY;
1635         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1636         if (adapter->netdev->features & NETIF_F_RXHASH)
1637                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1638         skb_mark_napi_id(skb, napi);
1639
1640         if (rxcp->vlanf)
1641                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1642
1643         napi_gro_frags(napi);
1644 }
1645
1646 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1647                                  struct be_rx_compl_info *rxcp)
1648 {
1649         rxcp->pkt_size =
1650                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1651         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1652         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1653         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1654         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1655         rxcp->ip_csum =
1656                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1657         rxcp->l4_csum =
1658                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1659         rxcp->ipv6 =
1660                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1661         rxcp->num_rcvd =
1662                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1663         rxcp->pkt_type =
1664                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1665         rxcp->rss_hash =
1666                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1667         if (rxcp->vlanf) {
1668                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1669                                           compl);
1670                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1671                                                compl);
1672         }
1673         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1674 }
1675
1676 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1677                                  struct be_rx_compl_info *rxcp)
1678 {
1679         rxcp->pkt_size =
1680                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1681         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1682         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1683         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1684         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1685         rxcp->ip_csum =
1686                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1687         rxcp->l4_csum =
1688                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1689         rxcp->ipv6 =
1690                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1691         rxcp->num_rcvd =
1692                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1693         rxcp->pkt_type =
1694                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1695         rxcp->rss_hash =
1696                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1697         if (rxcp->vlanf) {
1698                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1699                                           compl);
1700                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1701                                                compl);
1702         }
1703         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1704         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1705                                       ip_frag, compl);
1706 }
1707
1708 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1709 {
1710         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1711         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1712         struct be_adapter *adapter = rxo->adapter;
1713
1714         /* For checking the valid bit it is Ok to use either definition as the
1715          * valid bit is at the same position in both v0 and v1 Rx compl */
1716         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1717                 return NULL;
1718
1719         rmb();
1720         be_dws_le_to_cpu(compl, sizeof(*compl));
1721
1722         if (adapter->be3_native)
1723                 be_parse_rx_compl_v1(compl, rxcp);
1724         else
1725                 be_parse_rx_compl_v0(compl, rxcp);
1726
1727         if (rxcp->ip_frag)
1728                 rxcp->l4_csum = 0;
1729
1730         if (rxcp->vlanf) {
1731                 /* vlanf could be wrongly set in some cards.
1732                  * ignore if vtm is not set */
1733                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1734                         rxcp->vlanf = 0;
1735
1736                 if (!lancer_chip(adapter))
1737                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1738
1739                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1740                     !adapter->vlan_tag[rxcp->vlan_tag])
1741                         rxcp->vlanf = 0;
1742         }
1743
1744         /* As the compl has been parsed, reset it; we wont touch it again */
1745         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1746
1747         queue_tail_inc(&rxo->cq);
1748         return rxcp;
1749 }
1750
1751 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1752 {
1753         u32 order = get_order(size);
1754
1755         if (order > 0)
1756                 gfp |= __GFP_COMP;
1757         return  alloc_pages(gfp, order);
1758 }
1759
1760 /*
1761  * Allocate a page, split it to fragments of size rx_frag_size and post as
1762  * receive buffers to BE
1763  */
1764 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1765 {
1766         struct be_adapter *adapter = rxo->adapter;
1767         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1768         struct be_queue_info *rxq = &rxo->q;
1769         struct page *pagep = NULL;
1770         struct device *dev = &adapter->pdev->dev;
1771         struct be_eth_rx_d *rxd;
1772         u64 page_dmaaddr = 0, frag_dmaaddr;
1773         u32 posted, page_offset = 0;
1774
1775         page_info = &rxo->page_info_tbl[rxq->head];
1776         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1777                 if (!pagep) {
1778                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1779                         if (unlikely(!pagep)) {
1780                                 rx_stats(rxo)->rx_post_fail++;
1781                                 break;
1782                         }
1783                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1784                                                     adapter->big_page_size,
1785                                                     DMA_FROM_DEVICE);
1786                         if (dma_mapping_error(dev, page_dmaaddr)) {
1787                                 put_page(pagep);
1788                                 pagep = NULL;
1789                                 rx_stats(rxo)->rx_post_fail++;
1790                                 break;
1791                         }
1792                         page_info->page_offset = 0;
1793                 } else {
1794                         get_page(pagep);
1795                         page_info->page_offset = page_offset + rx_frag_size;
1796                 }
1797                 page_offset = page_info->page_offset;
1798                 page_info->page = pagep;
1799                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1800                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1801
1802                 rxd = queue_head_node(rxq);
1803                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1804                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1805
1806                 /* Any space left in the current big page for another frag? */
1807                 if ((page_offset + rx_frag_size + rx_frag_size) >
1808                                         adapter->big_page_size) {
1809                         pagep = NULL;
1810                         page_info->last_page_user = true;
1811                 }
1812
1813                 prev_page_info = page_info;
1814                 queue_head_inc(rxq);
1815                 page_info = &rxo->page_info_tbl[rxq->head];
1816         }
1817         if (pagep)
1818                 prev_page_info->last_page_user = true;
1819
1820         if (posted) {
1821                 atomic_add(posted, &rxq->used);
1822                 if (rxo->rx_post_starved)
1823                         rxo->rx_post_starved = false;
1824                 be_rxq_notify(adapter, rxq->id, posted);
1825         } else if (atomic_read(&rxq->used) == 0) {
1826                 /* Let be_worker replenish when memory is available */
1827                 rxo->rx_post_starved = true;
1828         }
1829 }
1830
1831 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1832 {
1833         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1834
1835         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1836                 return NULL;
1837
1838         rmb();
1839         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1840
1841         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1842
1843         queue_tail_inc(tx_cq);
1844         return txcp;
1845 }
1846
1847 static u16 be_tx_compl_process(struct be_adapter *adapter,
1848                 struct be_tx_obj *txo, u16 last_index)
1849 {
1850         struct be_queue_info *txq = &txo->q;
1851         struct be_eth_wrb *wrb;
1852         struct sk_buff **sent_skbs = txo->sent_skb_list;
1853         struct sk_buff *sent_skb;
1854         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1855         bool unmap_skb_hdr = true;
1856
1857         sent_skb = sent_skbs[txq->tail];
1858         BUG_ON(!sent_skb);
1859         sent_skbs[txq->tail] = NULL;
1860
1861         /* skip header wrb */
1862         queue_tail_inc(txq);
1863
1864         do {
1865                 cur_index = txq->tail;
1866                 wrb = queue_tail_node(txq);
1867                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1868                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1869                 unmap_skb_hdr = false;
1870
1871                 num_wrbs++;
1872                 queue_tail_inc(txq);
1873         } while (cur_index != last_index);
1874
1875         kfree_skb(sent_skb);
1876         return num_wrbs;
1877 }
1878
1879 /* Return the number of events in the event queue */
1880 static inline int events_get(struct be_eq_obj *eqo)
1881 {
1882         struct be_eq_entry *eqe;
1883         int num = 0;
1884
1885         do {
1886                 eqe = queue_tail_node(&eqo->q);
1887                 if (eqe->evt == 0)
1888                         break;
1889
1890                 rmb();
1891                 eqe->evt = 0;
1892                 num++;
1893                 queue_tail_inc(&eqo->q);
1894         } while (true);
1895
1896         return num;
1897 }
1898
1899 /* Leaves the EQ is disarmed state */
1900 static void be_eq_clean(struct be_eq_obj *eqo)
1901 {
1902         int num = events_get(eqo);
1903
1904         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1905 }
1906
1907 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1908 {
1909         struct be_rx_page_info *page_info;
1910         struct be_queue_info *rxq = &rxo->q;
1911         struct be_queue_info *rx_cq = &rxo->cq;
1912         struct be_rx_compl_info *rxcp;
1913         struct be_adapter *adapter = rxo->adapter;
1914         int flush_wait = 0;
1915
1916         /* Consume pending rx completions.
1917          * Wait for the flush completion (identified by zero num_rcvd)
1918          * to arrive. Notify CQ even when there are no more CQ entries
1919          * for HW to flush partially coalesced CQ entries.
1920          * In Lancer, there is no need to wait for flush compl.
1921          */
1922         for (;;) {
1923                 rxcp = be_rx_compl_get(rxo);
1924                 if (rxcp == NULL) {
1925                         if (lancer_chip(adapter))
1926                                 break;
1927
1928                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1929                                 dev_warn(&adapter->pdev->dev,
1930                                          "did not receive flush compl\n");
1931                                 break;
1932                         }
1933                         be_cq_notify(adapter, rx_cq->id, true, 0);
1934                         mdelay(1);
1935                 } else {
1936                         be_rx_compl_discard(rxo, rxcp);
1937                         be_cq_notify(adapter, rx_cq->id, false, 1);
1938                         if (rxcp->num_rcvd == 0)
1939                                 break;
1940                 }
1941         }
1942
1943         /* After cleanup, leave the CQ in unarmed state */
1944         be_cq_notify(adapter, rx_cq->id, false, 0);
1945
1946         /* Then free posted rx buffers that were not used */
1947         while (atomic_read(&rxq->used) > 0) {
1948                 page_info = get_rx_page_info(rxo);
1949                 put_page(page_info->page);
1950                 memset(page_info, 0, sizeof(*page_info));
1951         }
1952         BUG_ON(atomic_read(&rxq->used));
1953         rxq->tail = rxq->head = 0;
1954 }
1955
1956 static void be_tx_compl_clean(struct be_adapter *adapter)
1957 {
1958         struct be_tx_obj *txo;
1959         struct be_queue_info *txq;
1960         struct be_eth_tx_compl *txcp;
1961         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1962         struct sk_buff *sent_skb;
1963         bool dummy_wrb;
1964         int i, pending_txqs;
1965
1966         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1967         do {
1968                 pending_txqs = adapter->num_tx_qs;
1969
1970                 for_all_tx_queues(adapter, txo, i) {
1971                         txq = &txo->q;
1972                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1973                                 end_idx =
1974                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1975                                                       wrb_index, txcp);
1976                                 num_wrbs += be_tx_compl_process(adapter, txo,
1977                                                                 end_idx);
1978                                 cmpl++;
1979                         }
1980                         if (cmpl) {
1981                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1982                                 atomic_sub(num_wrbs, &txq->used);
1983                                 cmpl = 0;
1984                                 num_wrbs = 0;
1985                         }
1986                         if (atomic_read(&txq->used) == 0)
1987                                 pending_txqs--;
1988                 }
1989
1990                 if (pending_txqs == 0 || ++timeo > 200)
1991                         break;
1992
1993                 mdelay(1);
1994         } while (true);
1995
1996         for_all_tx_queues(adapter, txo, i) {
1997                 txq = &txo->q;
1998                 if (atomic_read(&txq->used))
1999                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2000                                 atomic_read(&txq->used));
2001
2002                 /* free posted tx for which compls will never arrive */
2003                 while (atomic_read(&txq->used)) {
2004                         sent_skb = txo->sent_skb_list[txq->tail];
2005                         end_idx = txq->tail;
2006                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2007                                                    &dummy_wrb);
2008                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2009                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2010                         atomic_sub(num_wrbs, &txq->used);
2011                 }
2012         }
2013 }
2014
2015 static void be_evt_queues_destroy(struct be_adapter *adapter)
2016 {
2017         struct be_eq_obj *eqo;
2018         int i;
2019
2020         for_all_evt_queues(adapter, eqo, i) {
2021                 if (eqo->q.created) {
2022                         be_eq_clean(eqo);
2023                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2024                         napi_hash_del(&eqo->napi);
2025                         netif_napi_del(&eqo->napi);
2026                 }
2027                 be_queue_free(adapter, &eqo->q);
2028         }
2029 }
2030
2031 static int be_evt_queues_create(struct be_adapter *adapter)
2032 {
2033         struct be_queue_info *eq;
2034         struct be_eq_obj *eqo;
2035         struct be_aic_obj *aic;
2036         int i, rc;
2037
2038         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2039                                     adapter->cfg_num_qs);
2040
2041         for_all_evt_queues(adapter, eqo, i) {
2042                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2043                                BE_NAPI_WEIGHT);
2044                 napi_hash_add(&eqo->napi);
2045                 aic = &adapter->aic_obj[i];
2046                 eqo->adapter = adapter;
2047                 eqo->tx_budget = BE_TX_BUDGET;
2048                 eqo->idx = i;
2049                 aic->max_eqd = BE_MAX_EQD;
2050                 aic->enable = true;
2051
2052                 eq = &eqo->q;
2053                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2054                                         sizeof(struct be_eq_entry));
2055                 if (rc)
2056                         return rc;
2057
2058                 rc = be_cmd_eq_create(adapter, eqo);
2059                 if (rc)
2060                         return rc;
2061         }
2062         return 0;
2063 }
2064
2065 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2066 {
2067         struct be_queue_info *q;
2068
2069         q = &adapter->mcc_obj.q;
2070         if (q->created)
2071                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2072         be_queue_free(adapter, q);
2073
2074         q = &adapter->mcc_obj.cq;
2075         if (q->created)
2076                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2077         be_queue_free(adapter, q);
2078 }
2079
2080 /* Must be called only after TX qs are created as MCC shares TX EQ */
2081 static int be_mcc_queues_create(struct be_adapter *adapter)
2082 {
2083         struct be_queue_info *q, *cq;
2084
2085         cq = &adapter->mcc_obj.cq;
2086         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2087                         sizeof(struct be_mcc_compl)))
2088                 goto err;
2089
2090         /* Use the default EQ for MCC completions */
2091         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2092                 goto mcc_cq_free;
2093
2094         q = &adapter->mcc_obj.q;
2095         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2096                 goto mcc_cq_destroy;
2097
2098         if (be_cmd_mccq_create(adapter, q, cq))
2099                 goto mcc_q_free;
2100
2101         return 0;
2102
2103 mcc_q_free:
2104         be_queue_free(adapter, q);
2105 mcc_cq_destroy:
2106         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2107 mcc_cq_free:
2108         be_queue_free(adapter, cq);
2109 err:
2110         return -1;
2111 }
2112
2113 static void be_tx_queues_destroy(struct be_adapter *adapter)
2114 {
2115         struct be_queue_info *q;
2116         struct be_tx_obj *txo;
2117         u8 i;
2118
2119         for_all_tx_queues(adapter, txo, i) {
2120                 q = &txo->q;
2121                 if (q->created)
2122                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2123                 be_queue_free(adapter, q);
2124
2125                 q = &txo->cq;
2126                 if (q->created)
2127                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2128                 be_queue_free(adapter, q);
2129         }
2130 }
2131
2132 static int be_tx_qs_create(struct be_adapter *adapter)
2133 {
2134         struct be_queue_info *cq, *eq;
2135         struct be_tx_obj *txo;
2136         int status, i;
2137
2138         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2139
2140         for_all_tx_queues(adapter, txo, i) {
2141                 cq = &txo->cq;
2142                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2143                                         sizeof(struct be_eth_tx_compl));
2144                 if (status)
2145                         return status;
2146
2147                 u64_stats_init(&txo->stats.sync);
2148                 u64_stats_init(&txo->stats.sync_compl);
2149
2150                 /* If num_evt_qs is less than num_tx_qs, then more than
2151                  * one txq share an eq
2152                  */
2153                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2154                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2155                 if (status)
2156                         return status;
2157
2158                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2159                                         sizeof(struct be_eth_wrb));
2160                 if (status)
2161                         return status;
2162
2163                 status = be_cmd_txq_create(adapter, txo);
2164                 if (status)
2165                         return status;
2166         }
2167
2168         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2169                  adapter->num_tx_qs);
2170         return 0;
2171 }
2172
2173 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2174 {
2175         struct be_queue_info *q;
2176         struct be_rx_obj *rxo;
2177         int i;
2178
2179         for_all_rx_queues(adapter, rxo, i) {
2180                 q = &rxo->cq;
2181                 if (q->created)
2182                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2183                 be_queue_free(adapter, q);
2184         }
2185 }
2186
2187 static int be_rx_cqs_create(struct be_adapter *adapter)
2188 {
2189         struct be_queue_info *eq, *cq;
2190         struct be_rx_obj *rxo;
2191         int rc, i;
2192
2193         /* We can create as many RSS rings as there are EQs. */
2194         adapter->num_rx_qs = adapter->num_evt_qs;
2195
2196         /* We'll use RSS only if atleast 2 RSS rings are supported.
2197          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2198          */
2199         if (adapter->num_rx_qs > 1)
2200                 adapter->num_rx_qs++;
2201
2202         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2203         for_all_rx_queues(adapter, rxo, i) {
2204                 rxo->adapter = adapter;
2205                 cq = &rxo->cq;
2206                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2207                                 sizeof(struct be_eth_rx_compl));
2208                 if (rc)
2209                         return rc;
2210
2211                 u64_stats_init(&rxo->stats.sync);
2212                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2213                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2214                 if (rc)
2215                         return rc;
2216         }
2217
2218         dev_info(&adapter->pdev->dev,
2219                  "created %d RSS queue(s) and 1 default RX queue\n",
2220                  adapter->num_rx_qs - 1);
2221         return 0;
2222 }
2223
2224 static irqreturn_t be_intx(int irq, void *dev)
2225 {
2226         struct be_eq_obj *eqo = dev;
2227         struct be_adapter *adapter = eqo->adapter;
2228         int num_evts = 0;
2229
2230         /* IRQ is not expected when NAPI is scheduled as the EQ
2231          * will not be armed.
2232          * But, this can happen on Lancer INTx where it takes
2233          * a while to de-assert INTx or in BE2 where occasionaly
2234          * an interrupt may be raised even when EQ is unarmed.
2235          * If NAPI is already scheduled, then counting & notifying
2236          * events will orphan them.
2237          */
2238         if (napi_schedule_prep(&eqo->napi)) {
2239                 num_evts = events_get(eqo);
2240                 __napi_schedule(&eqo->napi);
2241                 if (num_evts)
2242                         eqo->spurious_intr = 0;
2243         }
2244         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2245
2246         /* Return IRQ_HANDLED only for the the first spurious intr
2247          * after a valid intr to stop the kernel from branding
2248          * this irq as a bad one!
2249          */
2250         if (num_evts || eqo->spurious_intr++ == 0)
2251                 return IRQ_HANDLED;
2252         else
2253                 return IRQ_NONE;
2254 }
2255
2256 static irqreturn_t be_msix(int irq, void *dev)
2257 {
2258         struct be_eq_obj *eqo = dev;
2259
2260         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2261         napi_schedule(&eqo->napi);
2262         return IRQ_HANDLED;
2263 }
2264
2265 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2266 {
2267         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2268 }
2269
2270 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2271                         int budget, int polling)
2272 {
2273         struct be_adapter *adapter = rxo->adapter;
2274         struct be_queue_info *rx_cq = &rxo->cq;
2275         struct be_rx_compl_info *rxcp;
2276         u32 work_done;
2277
2278         for (work_done = 0; work_done < budget; work_done++) {
2279                 rxcp = be_rx_compl_get(rxo);
2280                 if (!rxcp)
2281                         break;
2282
2283                 /* Is it a flush compl that has no data */
2284                 if (unlikely(rxcp->num_rcvd == 0))
2285                         goto loop_continue;
2286
2287                 /* Discard compl with partial DMA Lancer B0 */
2288                 if (unlikely(!rxcp->pkt_size)) {
2289                         be_rx_compl_discard(rxo, rxcp);
2290                         goto loop_continue;
2291                 }
2292
2293                 /* On BE drop pkts that arrive due to imperfect filtering in
2294                  * promiscuous mode on some skews
2295                  */
2296                 if (unlikely(rxcp->port != adapter->port_num &&
2297                                 !lancer_chip(adapter))) {
2298                         be_rx_compl_discard(rxo, rxcp);
2299                         goto loop_continue;
2300                 }
2301
2302                 /* Don't do gro when we're busy_polling */
2303                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2304                         be_rx_compl_process_gro(rxo, napi, rxcp);
2305                 else
2306                         be_rx_compl_process(rxo, napi, rxcp);
2307
2308 loop_continue:
2309                 be_rx_stats_update(rxo, rxcp);
2310         }
2311
2312         if (work_done) {
2313                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2314
2315                 /* When an rx-obj gets into post_starved state, just
2316                  * let be_worker do the posting.
2317                  */
2318                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2319                     !rxo->rx_post_starved)
2320                         be_post_rx_frags(rxo, GFP_ATOMIC);
2321         }
2322
2323         return work_done;
2324 }
2325
2326 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2327                           int budget, int idx)
2328 {
2329         struct be_eth_tx_compl *txcp;
2330         int num_wrbs = 0, work_done;
2331
2332         for (work_done = 0; work_done < budget; work_done++) {
2333                 txcp = be_tx_compl_get(&txo->cq);
2334                 if (!txcp)
2335                         break;
2336                 num_wrbs += be_tx_compl_process(adapter, txo,
2337                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2338                                         wrb_index, txcp));
2339         }
2340
2341         if (work_done) {
2342                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2343                 atomic_sub(num_wrbs, &txo->q.used);
2344
2345                 /* As Tx wrbs have been freed up, wake up netdev queue
2346                  * if it was stopped due to lack of tx wrbs.  */
2347                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2348                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2349                         netif_wake_subqueue(adapter->netdev, idx);
2350                 }
2351
2352                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2353                 tx_stats(txo)->tx_compl += work_done;
2354                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2355         }
2356         return (work_done < budget); /* Done */
2357 }
2358
2359 int be_poll(struct napi_struct *napi, int budget)
2360 {
2361         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2362         struct be_adapter *adapter = eqo->adapter;
2363         int max_work = 0, work, i, num_evts;
2364         struct be_rx_obj *rxo;
2365         bool tx_done;
2366
2367         num_evts = events_get(eqo);
2368
2369         /* Process all TXQs serviced by this EQ */
2370         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2371                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2372                                         eqo->tx_budget, i);
2373                 if (!tx_done)
2374                         max_work = budget;
2375         }
2376
2377         if (be_lock_napi(eqo)) {
2378                 /* This loop will iterate twice for EQ0 in which
2379                  * completions of the last RXQ (default one) are also processed
2380                  * For other EQs the loop iterates only once
2381                  */
2382                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2383                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2384                         max_work = max(work, max_work);
2385                 }
2386                 be_unlock_napi(eqo);
2387         } else {
2388                 max_work = budget;
2389         }
2390
2391         if (is_mcc_eqo(eqo))
2392                 be_process_mcc(adapter);
2393
2394         if (max_work < budget) {
2395                 napi_complete(napi);
2396                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2397         } else {
2398                 /* As we'll continue in polling mode, count and clear events */
2399                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2400         }
2401         return max_work;
2402 }
2403
2404 #ifdef CONFIG_NET_RX_BUSY_POLL
2405 static int be_busy_poll(struct napi_struct *napi)
2406 {
2407         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2408         struct be_adapter *adapter = eqo->adapter;
2409         struct be_rx_obj *rxo;
2410         int i, work = 0;
2411
2412         if (!be_lock_busy_poll(eqo))
2413                 return LL_FLUSH_BUSY;
2414
2415         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2416                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2417                 if (work)
2418                         break;
2419         }
2420
2421         be_unlock_busy_poll(eqo);
2422         return work;
2423 }
2424 #endif
2425
2426 void be_detect_error(struct be_adapter *adapter)
2427 {
2428         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2429         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2430         u32 i;
2431
2432         if (be_hw_error(adapter))
2433                 return;
2434
2435         if (lancer_chip(adapter)) {
2436                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2437                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2438                         sliport_err1 = ioread32(adapter->db +
2439                                         SLIPORT_ERROR1_OFFSET);
2440                         sliport_err2 = ioread32(adapter->db +
2441                                         SLIPORT_ERROR2_OFFSET);
2442                 }
2443         } else {
2444                 pci_read_config_dword(adapter->pdev,
2445                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2446                 pci_read_config_dword(adapter->pdev,
2447                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2448                 pci_read_config_dword(adapter->pdev,
2449                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2450                 pci_read_config_dword(adapter->pdev,
2451                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2452
2453                 ue_lo = (ue_lo & ~ue_lo_mask);
2454                 ue_hi = (ue_hi & ~ue_hi_mask);
2455         }
2456
2457         /* On certain platforms BE hardware can indicate spurious UEs.
2458          * Allow the h/w to stop working completely in case of a real UE.
2459          * Hence not setting the hw_error for UE detection.
2460          */
2461         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2462                 adapter->hw_error = true;
2463                 /* Do not log error messages if its a FW reset */
2464                 if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2465                     sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2466                         dev_info(&adapter->pdev->dev,
2467                                  "Firmware update in progress\n");
2468                         return;
2469                 } else {
2470                         dev_err(&adapter->pdev->dev,
2471                                 "Error detected in the card\n");
2472                 }
2473         }
2474
2475         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2476                 dev_err(&adapter->pdev->dev,
2477                         "ERR: sliport status 0x%x\n", sliport_status);
2478                 dev_err(&adapter->pdev->dev,
2479                         "ERR: sliport error1 0x%x\n", sliport_err1);
2480                 dev_err(&adapter->pdev->dev,
2481                         "ERR: sliport error2 0x%x\n", sliport_err2);
2482         }
2483
2484         if (ue_lo) {
2485                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2486                         if (ue_lo & 1)
2487                                 dev_err(&adapter->pdev->dev,
2488                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2489                 }
2490         }
2491
2492         if (ue_hi) {
2493                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2494                         if (ue_hi & 1)
2495                                 dev_err(&adapter->pdev->dev,
2496                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2497                 }
2498         }
2499
2500 }
2501
2502 static void be_msix_disable(struct be_adapter *adapter)
2503 {
2504         if (msix_enabled(adapter)) {
2505                 pci_disable_msix(adapter->pdev);
2506                 adapter->num_msix_vec = 0;
2507                 adapter->num_msix_roce_vec = 0;
2508         }
2509 }
2510
2511 static int be_msix_enable(struct be_adapter *adapter)
2512 {
2513         int i, status, num_vec;
2514         struct device *dev = &adapter->pdev->dev;
2515
2516         /* If RoCE is supported, program the max number of NIC vectors that
2517          * may be configured via set-channels, along with vectors needed for
2518          * RoCe. Else, just program the number we'll use initially.
2519          */
2520         if (be_roce_supported(adapter))
2521                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2522                                 2 * num_online_cpus());
2523         else
2524                 num_vec = adapter->cfg_num_qs;
2525
2526         for (i = 0; i < num_vec; i++)
2527                 adapter->msix_entries[i].entry = i;
2528
2529         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2530         if (status == 0) {
2531                 goto done;
2532         } else if (status >= MIN_MSIX_VECTORS) {
2533                 num_vec = status;
2534                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2535                                          num_vec);
2536                 if (!status)
2537                         goto done;
2538         }
2539
2540         dev_warn(dev, "MSIx enable failed\n");
2541
2542         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2543         if (!be_physfn(adapter))
2544                 return status;
2545         return 0;
2546 done:
2547         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2548                 adapter->num_msix_roce_vec = num_vec / 2;
2549                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2550                          adapter->num_msix_roce_vec);
2551         }
2552
2553         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2554
2555         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2556                  adapter->num_msix_vec);
2557         return 0;
2558 }
2559
2560 static inline int be_msix_vec_get(struct be_adapter *adapter,
2561                                 struct be_eq_obj *eqo)
2562 {
2563         return adapter->msix_entries[eqo->msix_idx].vector;
2564 }
2565
2566 static int be_msix_register(struct be_adapter *adapter)
2567 {
2568         struct net_device *netdev = adapter->netdev;
2569         struct be_eq_obj *eqo;
2570         int status, i, vec;
2571
2572         for_all_evt_queues(adapter, eqo, i) {
2573                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2574                 vec = be_msix_vec_get(adapter, eqo);
2575                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2576                 if (status)
2577                         goto err_msix;
2578         }
2579
2580         return 0;
2581 err_msix:
2582         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2583                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2584         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2585                 status);
2586         be_msix_disable(adapter);
2587         return status;
2588 }
2589
2590 static int be_irq_register(struct be_adapter *adapter)
2591 {
2592         struct net_device *netdev = adapter->netdev;
2593         int status;
2594
2595         if (msix_enabled(adapter)) {
2596                 status = be_msix_register(adapter);
2597                 if (status == 0)
2598                         goto done;
2599                 /* INTx is not supported for VF */
2600                 if (!be_physfn(adapter))
2601                         return status;
2602         }
2603
2604         /* INTx: only the first EQ is used */
2605         netdev->irq = adapter->pdev->irq;
2606         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2607                              &adapter->eq_obj[0]);
2608         if (status) {
2609                 dev_err(&adapter->pdev->dev,
2610                         "INTx request IRQ failed - err %d\n", status);
2611                 return status;
2612         }
2613 done:
2614         adapter->isr_registered = true;
2615         return 0;
2616 }
2617
2618 static void be_irq_unregister(struct be_adapter *adapter)
2619 {
2620         struct net_device *netdev = adapter->netdev;
2621         struct be_eq_obj *eqo;
2622         int i;
2623
2624         if (!adapter->isr_registered)
2625                 return;
2626
2627         /* INTx */
2628         if (!msix_enabled(adapter)) {
2629                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2630                 goto done;
2631         }
2632
2633         /* MSIx */
2634         for_all_evt_queues(adapter, eqo, i)
2635                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2636
2637 done:
2638         adapter->isr_registered = false;
2639 }
2640
2641 static void be_rx_qs_destroy(struct be_adapter *adapter)
2642 {
2643         struct be_queue_info *q;
2644         struct be_rx_obj *rxo;
2645         int i;
2646
2647         for_all_rx_queues(adapter, rxo, i) {
2648                 q = &rxo->q;
2649                 if (q->created) {
2650                         be_cmd_rxq_destroy(adapter, q);
2651                         be_rx_cq_clean(rxo);
2652                 }
2653                 be_queue_free(adapter, q);
2654         }
2655 }
2656
2657 static int be_close(struct net_device *netdev)
2658 {
2659         struct be_adapter *adapter = netdev_priv(netdev);
2660         struct be_eq_obj *eqo;
2661         int i;
2662
2663         be_roce_dev_close(adapter);
2664
2665         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2666                 for_all_evt_queues(adapter, eqo, i) {
2667                         napi_disable(&eqo->napi);
2668                         be_disable_busy_poll(eqo);
2669                 }
2670                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2671         }
2672
2673         be_async_mcc_disable(adapter);
2674
2675         /* Wait for all pending tx completions to arrive so that
2676          * all tx skbs are freed.
2677          */
2678         netif_tx_disable(netdev);
2679         be_tx_compl_clean(adapter);
2680
2681         be_rx_qs_destroy(adapter);
2682
2683         for (i = 1; i < (adapter->uc_macs + 1); i++)
2684                 be_cmd_pmac_del(adapter, adapter->if_handle,
2685                                 adapter->pmac_id[i], 0);
2686         adapter->uc_macs = 0;
2687
2688         for_all_evt_queues(adapter, eqo, i) {
2689                 if (msix_enabled(adapter))
2690                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2691                 else
2692                         synchronize_irq(netdev->irq);
2693                 be_eq_clean(eqo);
2694         }
2695
2696         be_irq_unregister(adapter);
2697
2698         return 0;
2699 }
2700
2701 static int be_rx_qs_create(struct be_adapter *adapter)
2702 {
2703         struct be_rx_obj *rxo;
2704         int rc, i, j;
2705         u8 rsstable[128];
2706
2707         for_all_rx_queues(adapter, rxo, i) {
2708                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2709                                     sizeof(struct be_eth_rx_d));
2710                 if (rc)
2711                         return rc;
2712         }
2713
2714         /* The FW would like the default RXQ to be created first */
2715         rxo = default_rxo(adapter);
2716         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2717                                adapter->if_handle, false, &rxo->rss_id);
2718         if (rc)
2719                 return rc;
2720
2721         for_all_rss_queues(adapter, rxo, i) {
2722                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2723                                        rx_frag_size, adapter->if_handle,
2724                                        true, &rxo->rss_id);
2725                 if (rc)
2726                         return rc;
2727         }
2728
2729         if (be_multi_rxq(adapter)) {
2730                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2731                         for_all_rss_queues(adapter, rxo, i) {
2732                                 if ((j + i) >= 128)
2733                                         break;
2734                                 rsstable[j + i] = rxo->rss_id;
2735                         }
2736                 }
2737                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2738                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2739
2740                 if (!BEx_chip(adapter))
2741                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2742                                                 RSS_ENABLE_UDP_IPV6;
2743         } else {
2744                 /* Disable RSS, if only default RX Q is created */
2745                 adapter->rss_flags = RSS_ENABLE_NONE;
2746         }
2747
2748         rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2749                                128);
2750         if (rc) {
2751                 adapter->rss_flags = RSS_ENABLE_NONE;
2752                 return rc;
2753         }
2754
2755         /* First time posting */
2756         for_all_rx_queues(adapter, rxo, i)
2757                 be_post_rx_frags(rxo, GFP_KERNEL);
2758         return 0;
2759 }
2760
2761 static int be_open(struct net_device *netdev)
2762 {
2763         struct be_adapter *adapter = netdev_priv(netdev);
2764         struct be_eq_obj *eqo;
2765         struct be_rx_obj *rxo;
2766         struct be_tx_obj *txo;
2767         u8 link_status;
2768         int status, i;
2769
2770         status = be_rx_qs_create(adapter);
2771         if (status)
2772                 goto err;
2773
2774         status = be_irq_register(adapter);
2775         if (status)
2776                 goto err;
2777
2778         for_all_rx_queues(adapter, rxo, i)
2779                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2780
2781         for_all_tx_queues(adapter, txo, i)
2782                 be_cq_notify(adapter, txo->cq.id, true, 0);
2783
2784         be_async_mcc_enable(adapter);
2785
2786         for_all_evt_queues(adapter, eqo, i) {
2787                 napi_enable(&eqo->napi);
2788                 be_enable_busy_poll(eqo);
2789                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2790         }
2791         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2792
2793         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2794         if (!status)
2795                 be_link_status_update(adapter, link_status);
2796
2797         netif_tx_start_all_queues(netdev);
2798         be_roce_dev_open(adapter);
2799         return 0;
2800 err:
2801         be_close(adapter->netdev);
2802         return -EIO;
2803 }
2804
2805 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2806 {
2807         struct be_dma_mem cmd;
2808         int status = 0;
2809         u8 mac[ETH_ALEN];
2810
2811         memset(mac, 0, ETH_ALEN);
2812
2813         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2814         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2815                                      GFP_KERNEL);
2816         if (cmd.va == NULL)
2817                 return -1;
2818
2819         if (enable) {
2820                 status = pci_write_config_dword(adapter->pdev,
2821                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2822                 if (status) {
2823                         dev_err(&adapter->pdev->dev,
2824                                 "Could not enable Wake-on-lan\n");
2825                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2826                                           cmd.dma);
2827                         return status;
2828                 }
2829                 status = be_cmd_enable_magic_wol(adapter,
2830                                 adapter->netdev->dev_addr, &cmd);
2831                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2832                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2833         } else {
2834                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2835                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2836                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2837         }
2838
2839         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2840         return status;
2841 }
2842
2843 /*
2844  * Generate a seed MAC address from the PF MAC Address using jhash.
2845  * MAC Address for VFs are assigned incrementally starting from the seed.
2846  * These addresses are programmed in the ASIC by the PF and the VF driver
2847  * queries for the MAC address during its probe.
2848  */
2849 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2850 {
2851         u32 vf;
2852         int status = 0;
2853         u8 mac[ETH_ALEN];
2854         struct be_vf_cfg *vf_cfg;
2855
2856         be_vf_eth_addr_generate(adapter, mac);
2857
2858         for_all_vfs(adapter, vf_cfg, vf) {
2859                 if (BEx_chip(adapter))
2860                         status = be_cmd_pmac_add(adapter, mac,
2861                                                  vf_cfg->if_handle,
2862                                                  &vf_cfg->pmac_id, vf + 1);
2863                 else
2864                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
2865                                                 vf + 1);
2866
2867                 if (status)
2868                         dev_err(&adapter->pdev->dev,
2869                         "Mac address assignment failed for VF %d\n", vf);
2870                 else
2871                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2872
2873                 mac[5] += 1;
2874         }
2875         return status;
2876 }
2877
2878 static int be_vfs_mac_query(struct be_adapter *adapter)
2879 {
2880         int status, vf;
2881         u8 mac[ETH_ALEN];
2882         struct be_vf_cfg *vf_cfg;
2883
2884         for_all_vfs(adapter, vf_cfg, vf) {
2885                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
2886                                                mac, vf_cfg->if_handle,
2887                                                false, vf+1);
2888                 if (status)
2889                         return status;
2890                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2891         }
2892         return 0;
2893 }
2894
2895 static void be_vf_clear(struct be_adapter *adapter)
2896 {
2897         struct be_vf_cfg *vf_cfg;
2898         u32 vf;
2899
2900         if (pci_vfs_assigned(adapter->pdev)) {
2901                 dev_warn(&adapter->pdev->dev,
2902                          "VFs are assigned to VMs: not disabling VFs\n");
2903                 goto done;
2904         }
2905
2906         pci_disable_sriov(adapter->pdev);
2907
2908         for_all_vfs(adapter, vf_cfg, vf) {
2909                 if (BEx_chip(adapter))
2910                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2911                                         vf_cfg->pmac_id, vf + 1);
2912                 else
2913                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
2914                                        vf + 1);
2915
2916                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2917         }
2918 done:
2919         kfree(adapter->vf_cfg);
2920         adapter->num_vfs = 0;
2921 }
2922
2923 static void be_clear_queues(struct be_adapter *adapter)
2924 {
2925         be_mcc_queues_destroy(adapter);
2926         be_rx_cqs_destroy(adapter);
2927         be_tx_queues_destroy(adapter);
2928         be_evt_queues_destroy(adapter);
2929 }
2930
2931 static void be_cancel_worker(struct be_adapter *adapter)
2932 {
2933         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2934                 cancel_delayed_work_sync(&adapter->work);
2935                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2936         }
2937 }
2938
2939 static void be_mac_clear(struct be_adapter *adapter)
2940 {
2941         int i;
2942
2943         if (adapter->pmac_id) {
2944                 for (i = 0; i < (adapter->uc_macs + 1); i++)
2945                         be_cmd_pmac_del(adapter, adapter->if_handle,
2946                                         adapter->pmac_id[i], 0);
2947                 adapter->uc_macs = 0;
2948
2949                 kfree(adapter->pmac_id);
2950                 adapter->pmac_id = NULL;
2951         }
2952 }
2953
2954 static int be_clear(struct be_adapter *adapter)
2955 {
2956         be_cancel_worker(adapter);
2957
2958         if (sriov_enabled(adapter))
2959                 be_vf_clear(adapter);
2960
2961         /* delete the primary mac along with the uc-mac list */
2962         be_mac_clear(adapter);
2963
2964         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2965
2966         be_clear_queues(adapter);
2967
2968         be_msix_disable(adapter);
2969         return 0;
2970 }
2971
2972 static int be_vfs_if_create(struct be_adapter *adapter)
2973 {
2974         struct be_resources res = {0};
2975         struct be_vf_cfg *vf_cfg;
2976         u32 cap_flags, en_flags, vf;
2977         int status = 0;
2978
2979         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2980                     BE_IF_FLAGS_MULTICAST;
2981
2982         for_all_vfs(adapter, vf_cfg, vf) {
2983                 if (!BE3_chip(adapter)) {
2984                         status = be_cmd_get_profile_config(adapter, &res,
2985                                                            vf + 1);
2986                         if (!status)
2987                                 cap_flags = res.if_cap_flags;
2988                 }
2989
2990                 /* If a FW profile exists, then cap_flags are updated */
2991                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2992                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2993                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2994                                           &vf_cfg->if_handle, vf + 1);
2995                 if (status)
2996                         goto err;
2997         }
2998 err:
2999         return status;
3000 }
3001
3002 static int be_vf_setup_init(struct be_adapter *adapter)
3003 {
3004         struct be_vf_cfg *vf_cfg;
3005         int vf;
3006
3007         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3008                                   GFP_KERNEL);
3009         if (!adapter->vf_cfg)
3010                 return -ENOMEM;
3011
3012         for_all_vfs(adapter, vf_cfg, vf) {
3013                 vf_cfg->if_handle = -1;
3014                 vf_cfg->pmac_id = -1;
3015         }
3016         return 0;
3017 }
3018
3019 static int be_vf_setup(struct be_adapter *adapter)
3020 {
3021         struct be_vf_cfg *vf_cfg;
3022         u16 def_vlan, lnk_speed;
3023         int status, old_vfs, vf;
3024         struct device *dev = &adapter->pdev->dev;
3025         u32 privileges;
3026
3027         old_vfs = pci_num_vf(adapter->pdev);
3028         if (old_vfs) {
3029                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3030                 if (old_vfs != num_vfs)
3031                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3032                 adapter->num_vfs = old_vfs;
3033         } else {
3034                 if (num_vfs > be_max_vfs(adapter))
3035                         dev_info(dev, "Device supports %d VFs and not %d\n",
3036                                  be_max_vfs(adapter), num_vfs);
3037                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3038                 if (!adapter->num_vfs)
3039                         return 0;
3040         }
3041
3042         status = be_vf_setup_init(adapter);
3043         if (status)
3044                 goto err;
3045
3046         if (old_vfs) {
3047                 for_all_vfs(adapter, vf_cfg, vf) {
3048                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3049                         if (status)
3050                                 goto err;
3051                 }
3052         } else {
3053                 status = be_vfs_if_create(adapter);
3054                 if (status)
3055                         goto err;
3056         }
3057
3058         if (old_vfs) {
3059                 status = be_vfs_mac_query(adapter);
3060                 if (status)
3061                         goto err;
3062         } else {
3063                 status = be_vf_eth_addr_config(adapter);
3064                 if (status)
3065                         goto err;
3066         }
3067
3068         for_all_vfs(adapter, vf_cfg, vf) {
3069                 /* Allow VFs to programs MAC/VLAN filters */
3070                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3071                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3072                         status = be_cmd_set_fn_privileges(adapter,
3073                                                           privileges |
3074                                                           BE_PRIV_FILTMGMT,
3075                                                           vf + 1);
3076                         if (!status)
3077                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3078                                          vf);
3079                 }
3080
3081                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
3082                  * Allow full available bandwidth
3083                  */
3084                 if (BE3_chip(adapter) && !old_vfs)
3085                         be_cmd_set_qos(adapter, 1000, vf+1);
3086
3087                 status = be_cmd_link_status_query(adapter, &lnk_speed,
3088                                                   NULL, vf + 1);
3089                 if (!status)
3090                         vf_cfg->tx_rate = lnk_speed;
3091
3092                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
3093                                                vf + 1, vf_cfg->if_handle, NULL);
3094                 if (status)
3095                         goto err;
3096                 vf_cfg->def_vid = def_vlan;
3097
3098                 if (!old_vfs)
3099                         be_cmd_enable_vf(adapter, vf + 1);
3100         }
3101
3102         if (!old_vfs) {
3103                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3104                 if (status) {
3105                         dev_err(dev, "SRIOV enable failed\n");
3106                         adapter->num_vfs = 0;
3107                         goto err;
3108                 }
3109         }
3110         return 0;
3111 err:
3112         dev_err(dev, "VF setup failed\n");
3113         be_vf_clear(adapter);
3114         return status;
3115 }
3116
3117 /* On BE2/BE3 FW does not suggest the supported limits */
3118 static void BEx_get_resources(struct be_adapter *adapter,
3119                               struct be_resources *res)
3120 {
3121         struct pci_dev *pdev = adapter->pdev;
3122         bool use_sriov = false;
3123         int max_vfs;
3124
3125         max_vfs = pci_sriov_get_totalvfs(pdev);
3126
3127         if (BE3_chip(adapter) && sriov_want(adapter)) {
3128                 res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3129                 use_sriov = res->max_vfs;
3130         }
3131
3132         if (be_physfn(adapter))
3133                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3134         else
3135                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3136
3137         if (adapter->function_mode & FLEX10_MODE)
3138                 res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3139         else if (adapter->function_mode & UMC_ENABLED)
3140                 res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
3141         else
3142                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3143         res->max_mcast_mac = BE_MAX_MC;
3144
3145         /* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
3146         if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
3147             !be_physfn(adapter) || (adapter->port_num > 1))
3148                 res->max_tx_qs = 1;
3149         else
3150                 res->max_tx_qs = BE3_MAX_TX_QS;
3151
3152         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3153             !use_sriov && be_physfn(adapter))
3154                 res->max_rss_qs = (adapter->be3_native) ?
3155                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3156         res->max_rx_qs = res->max_rss_qs + 1;
3157
3158         if (be_physfn(adapter))
3159                 res->max_evt_qs = (max_vfs > 0) ?
3160                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3161         else
3162                 res->max_evt_qs = 1;
3163
3164         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3165         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3166                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3167 }
3168
3169 static void be_setup_init(struct be_adapter *adapter)
3170 {
3171         adapter->vlan_prio_bmap = 0xff;
3172         adapter->phy.link_speed = -1;
3173         adapter->if_handle = -1;
3174         adapter->be3_native = false;
3175         adapter->promiscuous = false;
3176         if (be_physfn(adapter))
3177                 adapter->cmd_privileges = MAX_PRIVILEGES;
3178         else
3179                 adapter->cmd_privileges = MIN_PRIVILEGES;
3180 }
3181
3182 static int be_get_resources(struct be_adapter *adapter)
3183 {
3184         struct device *dev = &adapter->pdev->dev;
3185         struct be_resources res = {0};
3186         int status;
3187
3188         if (BEx_chip(adapter)) {
3189                 BEx_get_resources(adapter, &res);
3190                 adapter->res = res;
3191         }
3192
3193         /* For Lancer, SH etc read per-function resource limits from FW.
3194          * GET_FUNC_CONFIG returns per function guaranteed limits.
3195          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3196          */
3197         if (!BEx_chip(adapter)) {
3198                 status = be_cmd_get_func_config(adapter, &res);
3199                 if (status)
3200                         return status;
3201
3202                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3203                 if (be_roce_supported(adapter))
3204                         res.max_evt_qs /= 2;
3205                 adapter->res = res;
3206
3207                 if (be_physfn(adapter)) {
3208                         status = be_cmd_get_profile_config(adapter, &res, 0);
3209                         if (status)
3210                                 return status;
3211                         adapter->res.max_vfs = res.max_vfs;
3212                 }
3213
3214                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3215                          be_max_txqs(adapter), be_max_rxqs(adapter),
3216                          be_max_rss(adapter), be_max_eqs(adapter),
3217                          be_max_vfs(adapter));
3218                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3219                          be_max_uc(adapter), be_max_mc(adapter),
3220                          be_max_vlans(adapter));
3221         }
3222
3223         return 0;
3224 }
3225
3226 /* Routine to query per function resource limits */
3227 static int be_get_config(struct be_adapter *adapter)
3228 {
3229         u16 profile_id;
3230         int status;
3231
3232         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3233                                      &adapter->function_mode,
3234                                      &adapter->function_caps,
3235                                      &adapter->asic_rev);
3236         if (status)
3237                 return status;
3238
3239          if (be_physfn(adapter)) {
3240                 status = be_cmd_get_active_profile(adapter, &profile_id);
3241                 if (!status)
3242                         dev_info(&adapter->pdev->dev,
3243                                  "Using profile 0x%x\n", profile_id);
3244         }
3245
3246         status = be_get_resources(adapter);
3247         if (status)
3248                 return status;
3249
3250         /* primary mac needs 1 pmac entry */
3251         adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
3252                                    GFP_KERNEL);
3253         if (!adapter->pmac_id)
3254                 return -ENOMEM;
3255
3256         /* Sanitize cfg_num_qs based on HW and platform limits */
3257         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3258
3259         return 0;
3260 }
3261
3262 static int be_mac_setup(struct be_adapter *adapter)
3263 {
3264         u8 mac[ETH_ALEN];
3265         int status;
3266
3267         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3268                 status = be_cmd_get_perm_mac(adapter, mac);
3269                 if (status)
3270                         return status;
3271
3272                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3273                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3274         } else {
3275                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3276                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3277         }
3278
3279         /* For BE3-R VFs, the PF programs the initial MAC address */
3280         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3281                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3282                                 &adapter->pmac_id[0], 0);
3283         return 0;
3284 }
3285
3286 static void be_schedule_worker(struct be_adapter *adapter)
3287 {
3288         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3289         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3290 }
3291
3292 static int be_setup_queues(struct be_adapter *adapter)
3293 {
3294         struct net_device *netdev = adapter->netdev;
3295         int status;
3296
3297         status = be_evt_queues_create(adapter);
3298         if (status)
3299                 goto err;
3300
3301         status = be_tx_qs_create(adapter);
3302         if (status)
3303                 goto err;
3304
3305         status = be_rx_cqs_create(adapter);
3306         if (status)
3307                 goto err;
3308
3309         status = be_mcc_queues_create(adapter);
3310         if (status)
3311                 goto err;
3312
3313         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3314         if (status)
3315                 goto err;
3316
3317         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3318         if (status)
3319                 goto err;
3320
3321         return 0;
3322 err:
3323         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3324         return status;
3325 }
3326
3327 int be_update_queues(struct be_adapter *adapter)
3328 {
3329         struct net_device *netdev = adapter->netdev;
3330         int status;
3331
3332         if (netif_running(netdev))
3333                 be_close(netdev);
3334
3335         be_cancel_worker(adapter);
3336
3337         /* If any vectors have been shared with RoCE we cannot re-program
3338          * the MSIx table.
3339          */
3340         if (!adapter->num_msix_roce_vec)
3341                 be_msix_disable(adapter);
3342
3343         be_clear_queues(adapter);
3344
3345         if (!msix_enabled(adapter)) {
3346                 status = be_msix_enable(adapter);
3347                 if (status)
3348                         return status;
3349         }
3350
3351         status = be_setup_queues(adapter);
3352         if (status)
3353                 return status;
3354
3355         be_schedule_worker(adapter);
3356
3357         if (netif_running(netdev))
3358                 status = be_open(netdev);
3359
3360         return status;
3361 }
3362
3363 static int be_setup(struct be_adapter *adapter)
3364 {
3365         struct device *dev = &adapter->pdev->dev;
3366         u32 tx_fc, rx_fc, en_flags;
3367         int status;
3368
3369         be_setup_init(adapter);
3370
3371         if (!lancer_chip(adapter))
3372                 be_cmd_req_native_mode(adapter);
3373
3374         status = be_get_config(adapter);
3375         if (status)
3376                 goto err;
3377
3378         status = be_msix_enable(adapter);
3379         if (status)
3380                 goto err;
3381
3382         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3383                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3384         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3385                 en_flags |= BE_IF_FLAGS_RSS;
3386         en_flags = en_flags & be_if_cap_flags(adapter);
3387         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3388                                   &adapter->if_handle, 0);
3389         if (status)
3390                 goto err;
3391
3392         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3393         rtnl_lock();
3394         status = be_setup_queues(adapter);
3395         rtnl_unlock();
3396         if (status)
3397                 goto err;
3398
3399         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3400
3401         status = be_mac_setup(adapter);
3402         if (status)
3403                 goto err;
3404
3405         be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
3406
3407         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3408                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3409                         adapter->fw_ver);
3410                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3411         }
3412
3413         if (adapter->vlans_added)
3414                 be_vid_config(adapter);
3415
3416         be_set_rx_mode(adapter->netdev);
3417
3418         be_cmd_get_acpi_wol_cap(adapter);
3419
3420         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3421
3422         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3423                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3424                                         adapter->rx_fc);
3425
3426         if (sriov_want(adapter)) {
3427                 if (be_max_vfs(adapter))
3428                         be_vf_setup(adapter);
3429                 else
3430                         dev_warn(dev, "device doesn't support SRIOV\n");
3431         }
3432
3433         status = be_cmd_get_phy_info(adapter);
3434         if (!status && be_pause_supported(adapter))
3435                 adapter->phy.fc_autoneg = 1;
3436
3437         be_schedule_worker(adapter);
3438         return 0;
3439 err:
3440         be_clear(adapter);
3441         return status;
3442 }
3443
3444 #ifdef CONFIG_NET_POLL_CONTROLLER
3445 static void be_netpoll(struct net_device *netdev)
3446 {
3447         struct be_adapter *adapter = netdev_priv(netdev);
3448         struct be_eq_obj *eqo;
3449         int i;
3450
3451         for_all_evt_queues(adapter, eqo, i) {
3452                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3453                 napi_schedule(&eqo->napi);
3454         }
3455
3456         return;
3457 }
3458 #endif
3459
3460 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3461 static char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3462
3463 static bool be_flash_redboot(struct be_adapter *adapter,
3464                         const u8 *p, u32 img_start, int image_size,
3465                         int hdr_size)
3466 {
3467         u32 crc_offset;
3468         u8 flashed_crc[4];
3469         int status;
3470
3471         crc_offset = hdr_size + img_start + image_size - 4;
3472
3473         p += crc_offset;
3474
3475         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3476                         (image_size - 4));
3477         if (status) {
3478                 dev_err(&adapter->pdev->dev,
3479                 "could not get crc from flash, not flashing redboot\n");
3480                 return false;
3481         }
3482
3483         /*update redboot only if crc does not match*/
3484         if (!memcmp(flashed_crc, p, 4))
3485                 return false;
3486         else
3487                 return true;
3488 }
3489
3490 static bool phy_flashing_required(struct be_adapter *adapter)
3491 {
3492         return (adapter->phy.phy_type == TN_8022 &&
3493                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3494 }
3495
3496 static bool is_comp_in_ufi(struct be_adapter *adapter,
3497                            struct flash_section_info *fsec, int type)
3498 {
3499         int i = 0, img_type = 0;
3500         struct flash_section_info_g2 *fsec_g2 = NULL;
3501
3502         if (BE2_chip(adapter))
3503                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3504
3505         for (i = 0; i < MAX_FLASH_COMP; i++) {
3506                 if (fsec_g2)
3507                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3508                 else
3509                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3510
3511                 if (img_type == type)
3512                         return true;
3513         }
3514         return false;
3515
3516 }
3517
3518 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3519                                          int header_size,
3520                                          const struct firmware *fw)
3521 {
3522         struct flash_section_info *fsec = NULL;
3523         const u8 *p = fw->data;
3524
3525         p += header_size;
3526         while (p < (fw->data + fw->size)) {
3527                 fsec = (struct flash_section_info *)p;
3528                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3529                         return fsec;
3530                 p += 32;
3531         }
3532         return NULL;
3533 }
3534
3535 static int be_flash(struct be_adapter *adapter, const u8 *img,
3536                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3537 {
3538         u32 total_bytes = 0, flash_op, num_bytes = 0;
3539         int status = 0;
3540         struct be_cmd_write_flashrom *req = flash_cmd->va;
3541
3542         total_bytes = img_size;
3543         while (total_bytes) {
3544                 num_bytes = min_t(u32, 32*1024, total_bytes);
3545
3546                 total_bytes -= num_bytes;
3547
3548                 if (!total_bytes) {
3549                         if (optype == OPTYPE_PHY_FW)
3550                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3551                         else
3552                                 flash_op = FLASHROM_OPER_FLASH;
3553                 } else {
3554                         if (optype == OPTYPE_PHY_FW)
3555                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3556                         else
3557                                 flash_op = FLASHROM_OPER_SAVE;
3558                 }
3559
3560                 memcpy(req->data_buf, img, num_bytes);
3561                 img += num_bytes;
3562                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3563                                                 flash_op, num_bytes);
3564                 if (status) {
3565                         if (status == ILLEGAL_IOCTL_REQ &&
3566                             optype == OPTYPE_PHY_FW)
3567                                 break;
3568                         dev_err(&adapter->pdev->dev,
3569                                 "cmd to write to flash rom failed.\n");
3570                         return status;
3571                 }
3572         }
3573         return 0;
3574 }
3575
3576 /* For BE2, BE3 and BE3-R */
3577 static int be_flash_BEx(struct be_adapter *adapter,
3578                          const struct firmware *fw,
3579                          struct be_dma_mem *flash_cmd,
3580                          int num_of_images)
3581
3582 {
3583         int status = 0, i, filehdr_size = 0;
3584         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3585         const u8 *p = fw->data;
3586         const struct flash_comp *pflashcomp;
3587         int num_comp, redboot;
3588         struct flash_section_info *fsec = NULL;
3589
3590         struct flash_comp gen3_flash_types[] = {
3591                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3592                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3593                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3594                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3595                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3596                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3597                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3598                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3599                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3600                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3601                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3602                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3603                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3604                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3605                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3606                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3607                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3608                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3609                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3610                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3611         };
3612
3613         struct flash_comp gen2_flash_types[] = {
3614                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3615                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3616                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3617                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3618                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3619                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3620                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3621                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3622                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3623                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3624                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3625                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3626                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3627                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3628                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3629                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3630         };
3631
3632         if (BE3_chip(adapter)) {
3633                 pflashcomp = gen3_flash_types;
3634                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3635                 num_comp = ARRAY_SIZE(gen3_flash_types);
3636         } else {
3637                 pflashcomp = gen2_flash_types;
3638                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3639                 num_comp = ARRAY_SIZE(gen2_flash_types);
3640         }
3641
3642         /* Get flash section info*/
3643         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3644         if (!fsec) {
3645                 dev_err(&adapter->pdev->dev,
3646                         "Invalid Cookie. UFI corrupted ?\n");
3647                 return -1;
3648         }
3649         for (i = 0; i < num_comp; i++) {
3650                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3651                         continue;
3652
3653                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3654                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3655                         continue;
3656
3657                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3658                     !phy_flashing_required(adapter))
3659                                 continue;
3660
3661                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3662                         redboot = be_flash_redboot(adapter, fw->data,
3663                                 pflashcomp[i].offset, pflashcomp[i].size,
3664                                 filehdr_size + img_hdrs_size);
3665                         if (!redboot)
3666                                 continue;
3667                 }
3668
3669                 p = fw->data;
3670                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3671                 if (p + pflashcomp[i].size > fw->data + fw->size)
3672                         return -1;
3673
3674                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3675                                         pflashcomp[i].size);
3676                 if (status) {
3677                         dev_err(&adapter->pdev->dev,
3678                                 "Flashing section type %d failed.\n",
3679                                 pflashcomp[i].img_type);
3680                         return status;
3681                 }
3682         }
3683         return 0;
3684 }
3685
3686 static int be_flash_skyhawk(struct be_adapter *adapter,
3687                 const struct firmware *fw,
3688                 struct be_dma_mem *flash_cmd, int num_of_images)
3689 {
3690         int status = 0, i, filehdr_size = 0;
3691         int img_offset, img_size, img_optype, redboot;
3692         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3693         const u8 *p = fw->data;
3694         struct flash_section_info *fsec = NULL;
3695
3696         filehdr_size = sizeof(struct flash_file_hdr_g3);
3697         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3698         if (!fsec) {
3699                 dev_err(&adapter->pdev->dev,
3700                         "Invalid Cookie. UFI corrupted ?\n");
3701                 return -1;
3702         }
3703
3704         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3705                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3706                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3707
3708                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3709                 case IMAGE_FIRMWARE_iSCSI:
3710                         img_optype = OPTYPE_ISCSI_ACTIVE;
3711                         break;
3712                 case IMAGE_BOOT_CODE:
3713                         img_optype = OPTYPE_REDBOOT;
3714                         break;
3715                 case IMAGE_OPTION_ROM_ISCSI:
3716                         img_optype = OPTYPE_BIOS;
3717                         break;
3718                 case IMAGE_OPTION_ROM_PXE:
3719                         img_optype = OPTYPE_PXE_BIOS;
3720                         break;
3721                 case IMAGE_OPTION_ROM_FCoE:
3722                         img_optype = OPTYPE_FCOE_BIOS;
3723                         break;
3724                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3725                         img_optype = OPTYPE_ISCSI_BACKUP;
3726                         break;
3727                 case IMAGE_NCSI:
3728                         img_optype = OPTYPE_NCSI_FW;
3729                         break;
3730                 default:
3731                         continue;
3732                 }
3733
3734                 if (img_optype == OPTYPE_REDBOOT) {
3735                         redboot = be_flash_redboot(adapter, fw->data,
3736                                         img_offset, img_size,
3737                                         filehdr_size + img_hdrs_size);
3738                         if (!redboot)
3739                                 continue;
3740                 }
3741
3742                 p = fw->data;
3743                 p += filehdr_size + img_offset + img_hdrs_size;
3744                 if (p + img_size > fw->data + fw->size)
3745                         return -1;
3746
3747                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3748                 if (status) {
3749                         dev_err(&adapter->pdev->dev,
3750                                 "Flashing section type %d failed.\n",
3751                                 fsec->fsec_entry[i].type);
3752                         return status;
3753                 }
3754         }
3755         return 0;
3756 }
3757
3758 static int lancer_fw_download(struct be_adapter *adapter,
3759                                 const struct firmware *fw)
3760 {
3761 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3762 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3763         struct be_dma_mem flash_cmd;
3764         const u8 *data_ptr = NULL;
3765         u8 *dest_image_ptr = NULL;
3766         size_t image_size = 0;
3767         u32 chunk_size = 0;
3768         u32 data_written = 0;
3769         u32 offset = 0;
3770         int status = 0;
3771         u8 add_status = 0;
3772         u8 change_status;
3773
3774         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3775                 dev_err(&adapter->pdev->dev,
3776                         "FW Image not properly aligned. "
3777                         "Length must be 4 byte aligned.\n");
3778                 status = -EINVAL;
3779                 goto lancer_fw_exit;
3780         }
3781
3782         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3783                                 + LANCER_FW_DOWNLOAD_CHUNK;
3784         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3785                                           &flash_cmd.dma, GFP_KERNEL);
3786         if (!flash_cmd.va) {
3787                 status = -ENOMEM;
3788                 goto lancer_fw_exit;
3789         }
3790
3791         dest_image_ptr = flash_cmd.va +
3792                                 sizeof(struct lancer_cmd_req_write_object);
3793         image_size = fw->size;
3794         data_ptr = fw->data;
3795
3796         while (image_size) {
3797                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3798
3799                 /* Copy the image chunk content. */
3800                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3801
3802                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3803                                                  chunk_size, offset,
3804                                                  LANCER_FW_DOWNLOAD_LOCATION,
3805                                                  &data_written, &change_status,
3806                                                  &add_status);
3807                 if (status)
3808                         break;
3809
3810                 offset += data_written;
3811                 data_ptr += data_written;
3812                 image_size -= data_written;
3813         }
3814
3815         if (!status) {
3816                 /* Commit the FW written */
3817                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3818                                                  0, offset,
3819                                                  LANCER_FW_DOWNLOAD_LOCATION,
3820                                                  &data_written, &change_status,
3821                                                  &add_status);
3822         }
3823
3824         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3825                                 flash_cmd.dma);
3826         if (status) {
3827                 dev_err(&adapter->pdev->dev,
3828                         "Firmware load error. "
3829                         "Status code: 0x%x Additional Status: 0x%x\n",
3830                         status, add_status);
3831                 goto lancer_fw_exit;
3832         }
3833
3834         if (change_status == LANCER_FW_RESET_NEEDED) {
3835                 dev_info(&adapter->pdev->dev,
3836                          "Resetting adapter to activate new FW\n");
3837                 status = lancer_physdev_ctrl(adapter,
3838                                              PHYSDEV_CONTROL_FW_RESET_MASK);
3839                 if (status) {
3840                         dev_err(&adapter->pdev->dev,
3841                                 "Adapter busy for FW reset.\n"
3842                                 "New FW will not be active.\n");
3843                         goto lancer_fw_exit;
3844                 }
3845         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3846                         dev_err(&adapter->pdev->dev,
3847                                 "System reboot required for new FW"
3848                                 " to be active\n");
3849         }
3850
3851         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3852 lancer_fw_exit:
3853         return status;
3854 }
3855
3856 #define UFI_TYPE2               2
3857 #define UFI_TYPE3               3
3858 #define UFI_TYPE3R              10
3859 #define UFI_TYPE4               4
3860 static int be_get_ufi_type(struct be_adapter *adapter,
3861                            struct flash_file_hdr_g3 *fhdr)
3862 {
3863         if (fhdr == NULL)
3864                 goto be_get_ufi_exit;
3865
3866         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3867                 return UFI_TYPE4;
3868         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3869                 if (fhdr->asic_type_rev == 0x10)
3870                         return UFI_TYPE3R;
3871                 else
3872                         return UFI_TYPE3;
3873         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3874                 return UFI_TYPE2;
3875
3876 be_get_ufi_exit:
3877         dev_err(&adapter->pdev->dev,
3878                 "UFI and Interface are not compatible for flashing\n");
3879         return -1;
3880 }
3881
3882 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3883 {
3884         struct flash_file_hdr_g3 *fhdr3;
3885         struct image_hdr *img_hdr_ptr = NULL;
3886         struct be_dma_mem flash_cmd;
3887         const u8 *p;
3888         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3889
3890         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3891         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3892                                           &flash_cmd.dma, GFP_KERNEL);
3893         if (!flash_cmd.va) {
3894                 status = -ENOMEM;
3895                 goto be_fw_exit;
3896         }
3897
3898         p = fw->data;
3899         fhdr3 = (struct flash_file_hdr_g3 *)p;
3900
3901         ufi_type = be_get_ufi_type(adapter, fhdr3);
3902
3903         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3904         for (i = 0; i < num_imgs; i++) {
3905                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3906                                 (sizeof(struct flash_file_hdr_g3) +
3907                                  i * sizeof(struct image_hdr)));
3908                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3909                         switch (ufi_type) {
3910                         case UFI_TYPE4:
3911                                 status = be_flash_skyhawk(adapter, fw,
3912                                                         &flash_cmd, num_imgs);
3913                                 break;
3914                         case UFI_TYPE3R:
3915                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3916                                                       num_imgs);
3917                                 break;
3918                         case UFI_TYPE3:
3919                                 /* Do not flash this ufi on BE3-R cards */
3920                                 if (adapter->asic_rev < 0x10)
3921                                         status = be_flash_BEx(adapter, fw,
3922                                                               &flash_cmd,
3923                                                               num_imgs);
3924                                 else {
3925                                         status = -1;
3926                                         dev_err(&adapter->pdev->dev,
3927                                                 "Can't load BE3 UFI on BE3R\n");
3928                                 }
3929                         }
3930                 }
3931         }
3932
3933         if (ufi_type == UFI_TYPE2)
3934                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3935         else if (ufi_type == -1)
3936                 status = -1;
3937
3938         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3939                           flash_cmd.dma);
3940         if (status) {
3941                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3942                 goto be_fw_exit;
3943         }
3944
3945         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3946
3947 be_fw_exit:
3948         return status;
3949 }
3950
3951 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3952 {
3953         const struct firmware *fw;
3954         int status;
3955
3956         if (!netif_running(adapter->netdev)) {
3957                 dev_err(&adapter->pdev->dev,
3958                         "Firmware load not allowed (interface is down)\n");
3959                 return -1;
3960         }
3961
3962         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3963         if (status)
3964                 goto fw_exit;
3965
3966         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3967
3968         if (lancer_chip(adapter))
3969                 status = lancer_fw_download(adapter, fw);
3970         else
3971                 status = be_fw_download(adapter, fw);
3972
3973         if (!status)
3974                 be_cmd_get_fw_ver(adapter, adapter->fw_ver,
3975                                   adapter->fw_on_flash);
3976
3977 fw_exit:
3978         release_firmware(fw);
3979         return status;
3980 }
3981
3982 static int be_ndo_bridge_setlink(struct net_device *dev,
3983                                     struct nlmsghdr *nlh)
3984 {
3985         struct be_adapter *adapter = netdev_priv(dev);
3986         struct nlattr *attr, *br_spec;
3987         int rem;
3988         int status = 0;
3989         u16 mode = 0;
3990
3991         if (!sriov_enabled(adapter))
3992                 return -EOPNOTSUPP;
3993
3994         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
3995
3996         nla_for_each_nested(attr, br_spec, rem) {
3997                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
3998                         continue;
3999
4000                 mode = nla_get_u16(attr);
4001                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4002                         return -EINVAL;
4003
4004                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4005                                                adapter->if_handle,
4006                                                mode == BRIDGE_MODE_VEPA ?
4007                                                PORT_FWD_TYPE_VEPA :
4008                                                PORT_FWD_TYPE_VEB);
4009                 if (status)
4010                         goto err;
4011
4012                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4013                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4014
4015                 return status;
4016         }
4017 err:
4018         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4019                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4020
4021         return status;
4022 }
4023
4024 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4025                                     struct net_device *dev,
4026                                     u32 filter_mask)
4027 {
4028         struct be_adapter *adapter = netdev_priv(dev);
4029         int status = 0;
4030         u8 hsw_mode;
4031
4032         if (!sriov_enabled(adapter))
4033                 return 0;
4034
4035         /* BE and Lancer chips support VEB mode only */
4036         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4037                 hsw_mode = PORT_FWD_TYPE_VEB;
4038         } else {
4039                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4040                                                adapter->if_handle, &hsw_mode);
4041                 if (status)
4042                         return 0;
4043         }
4044
4045         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4046                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4047                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4048 }
4049
4050 static const struct net_device_ops be_netdev_ops = {
4051         .ndo_open               = be_open,
4052         .ndo_stop               = be_close,
4053         .ndo_start_xmit         = be_xmit,
4054         .ndo_set_rx_mode        = be_set_rx_mode,
4055         .ndo_set_mac_address    = be_mac_addr_set,
4056         .ndo_change_mtu         = be_change_mtu,
4057         .ndo_get_stats64        = be_get_stats64,
4058         .ndo_validate_addr      = eth_validate_addr,
4059         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4060         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4061         .ndo_set_vf_mac         = be_set_vf_mac,
4062         .ndo_set_vf_vlan        = be_set_vf_vlan,
4063         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
4064         .ndo_get_vf_config      = be_get_vf_config,
4065 #ifdef CONFIG_NET_POLL_CONTROLLER
4066         .ndo_poll_controller    = be_netpoll,
4067 #endif
4068         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4069         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4070 #ifdef CONFIG_NET_RX_BUSY_POLL
4071         .ndo_busy_poll          = be_busy_poll
4072 #endif
4073 };
4074
4075 static void be_netdev_init(struct net_device *netdev)
4076 {
4077         struct be_adapter *adapter = netdev_priv(netdev);
4078
4079         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4080                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4081                 NETIF_F_HW_VLAN_CTAG_TX;
4082         if (be_multi_rxq(adapter))
4083                 netdev->hw_features |= NETIF_F_RXHASH;
4084
4085         netdev->features |= netdev->hw_features |
4086                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4087
4088         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4089                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4090
4091         netdev->priv_flags |= IFF_UNICAST_FLT;
4092
4093         netdev->flags |= IFF_MULTICAST;
4094
4095         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4096
4097         netdev->netdev_ops = &be_netdev_ops;
4098
4099         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
4100 }
4101
4102 static void be_unmap_pci_bars(struct be_adapter *adapter)
4103 {
4104         if (adapter->csr)
4105                 pci_iounmap(adapter->pdev, adapter->csr);
4106         if (adapter->db)
4107                 pci_iounmap(adapter->pdev, adapter->db);
4108 }
4109
4110 static int db_bar(struct be_adapter *adapter)
4111 {
4112         if (lancer_chip(adapter) || !be_physfn(adapter))
4113                 return 0;
4114         else
4115                 return 4;
4116 }
4117
4118 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4119 {
4120         if (skyhawk_chip(adapter)) {
4121                 adapter->roce_db.size = 4096;
4122                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4123                                                               db_bar(adapter));
4124                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4125                                                                db_bar(adapter));
4126         }
4127         return 0;
4128 }
4129
4130 static int be_map_pci_bars(struct be_adapter *adapter)
4131 {
4132         u8 __iomem *addr;
4133
4134         if (BEx_chip(adapter) && be_physfn(adapter)) {
4135                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4136                 if (adapter->csr == NULL)
4137                         return -ENOMEM;
4138         }
4139
4140         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4141         if (addr == NULL)
4142                 goto pci_map_err;
4143         adapter->db = addr;
4144
4145         be_roce_map_pci_bars(adapter);
4146         return 0;
4147
4148 pci_map_err:
4149         be_unmap_pci_bars(adapter);
4150         return -ENOMEM;
4151 }
4152
4153 static void be_ctrl_cleanup(struct be_adapter *adapter)
4154 {
4155         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4156
4157         be_unmap_pci_bars(adapter);
4158
4159         if (mem->va)
4160                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4161                                   mem->dma);
4162
4163         mem = &adapter->rx_filter;
4164         if (mem->va)
4165                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4166                                   mem->dma);
4167 }
4168
4169 static int be_ctrl_init(struct be_adapter *adapter)
4170 {
4171         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4172         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4173         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4174         u32 sli_intf;
4175         int status;
4176
4177         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4178         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4179                                  SLI_INTF_FAMILY_SHIFT;
4180         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4181
4182         status = be_map_pci_bars(adapter);
4183         if (status)
4184                 goto done;
4185
4186         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4187         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4188                                                 mbox_mem_alloc->size,
4189                                                 &mbox_mem_alloc->dma,
4190                                                 GFP_KERNEL);
4191         if (!mbox_mem_alloc->va) {
4192                 status = -ENOMEM;
4193                 goto unmap_pci_bars;
4194         }
4195         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4196         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4197         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4198         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4199
4200         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4201         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4202                                             rx_filter->size, &rx_filter->dma,
4203                                             GFP_KERNEL);
4204         if (rx_filter->va == NULL) {
4205                 status = -ENOMEM;
4206                 goto free_mbox;
4207         }
4208
4209         mutex_init(&adapter->mbox_lock);
4210         spin_lock_init(&adapter->mcc_lock);
4211         spin_lock_init(&adapter->mcc_cq_lock);
4212
4213         init_completion(&adapter->et_cmd_compl);
4214         pci_save_state(adapter->pdev);
4215         return 0;
4216
4217 free_mbox:
4218         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4219                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4220
4221 unmap_pci_bars:
4222         be_unmap_pci_bars(adapter);
4223
4224 done:
4225         return status;
4226 }
4227
4228 static void be_stats_cleanup(struct be_adapter *adapter)
4229 {
4230         struct be_dma_mem *cmd = &adapter->stats_cmd;
4231
4232         if (cmd->va)
4233                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4234                                   cmd->va, cmd->dma);
4235 }
4236
4237 static int be_stats_init(struct be_adapter *adapter)
4238 {
4239         struct be_dma_mem *cmd = &adapter->stats_cmd;
4240
4241         if (lancer_chip(adapter))
4242                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4243         else if (BE2_chip(adapter))
4244                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4245         else if (BE3_chip(adapter))
4246                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4247         else
4248                 /* ALL non-BE ASICs */
4249                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4250
4251         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4252                                       GFP_KERNEL);
4253         if (cmd->va == NULL)
4254                 return -1;
4255         return 0;
4256 }
4257
4258 static void be_remove(struct pci_dev *pdev)
4259 {
4260         struct be_adapter *adapter = pci_get_drvdata(pdev);
4261
4262         if (!adapter)
4263                 return;
4264
4265         be_roce_dev_remove(adapter);
4266         be_intr_set(adapter, false);
4267
4268         cancel_delayed_work_sync(&adapter->func_recovery_work);
4269
4270         unregister_netdev(adapter->netdev);
4271
4272         be_clear(adapter);
4273
4274         /* tell fw we're done with firing cmds */
4275         be_cmd_fw_clean(adapter);
4276
4277         be_stats_cleanup(adapter);
4278
4279         be_ctrl_cleanup(adapter);
4280
4281         pci_disable_pcie_error_reporting(pdev);
4282
4283         pci_release_regions(pdev);
4284         pci_disable_device(pdev);
4285
4286         free_netdev(adapter->netdev);
4287 }
4288
4289 static int be_get_initial_config(struct be_adapter *adapter)
4290 {
4291         int status, level;
4292
4293         status = be_cmd_get_cntl_attributes(adapter);
4294         if (status)
4295                 return status;
4296
4297         /* Must be a power of 2 or else MODULO will BUG_ON */
4298         adapter->be_get_temp_freq = 64;
4299
4300         if (BEx_chip(adapter)) {
4301                 level = be_cmd_get_fw_log_level(adapter);
4302                 adapter->msg_enable =
4303                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4304         }
4305
4306         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4307         return 0;
4308 }
4309
4310 static int lancer_recover_func(struct be_adapter *adapter)
4311 {
4312         struct device *dev = &adapter->pdev->dev;
4313         int status;
4314
4315         status = lancer_test_and_set_rdy_state(adapter);
4316         if (status)
4317                 goto err;
4318
4319         if (netif_running(adapter->netdev))
4320                 be_close(adapter->netdev);
4321
4322         be_clear(adapter);
4323
4324         be_clear_all_error(adapter);
4325
4326         status = be_setup(adapter);
4327         if (status)
4328                 goto err;
4329
4330         if (netif_running(adapter->netdev)) {
4331                 status = be_open(adapter->netdev);
4332                 if (status)
4333                         goto err;
4334         }
4335
4336         dev_err(dev, "Adapter recovery successful\n");
4337         return 0;
4338 err:
4339         if (status == -EAGAIN)
4340                 dev_err(dev, "Waiting for resource provisioning\n");
4341         else
4342                 dev_err(dev, "Adapter recovery failed\n");
4343
4344         return status;
4345 }
4346
4347 static void be_func_recovery_task(struct work_struct *work)
4348 {
4349         struct be_adapter *adapter =
4350                 container_of(work, struct be_adapter,  func_recovery_work.work);
4351         int status = 0;
4352
4353         be_detect_error(adapter);
4354
4355         if (adapter->hw_error && lancer_chip(adapter)) {
4356
4357                 rtnl_lock();
4358                 netif_device_detach(adapter->netdev);
4359                 rtnl_unlock();
4360
4361                 status = lancer_recover_func(adapter);
4362                 if (!status)
4363                         netif_device_attach(adapter->netdev);
4364         }
4365
4366         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4367          * no need to attempt further recovery.
4368          */
4369         if (!status || status == -EAGAIN)
4370                 schedule_delayed_work(&adapter->func_recovery_work,
4371                                       msecs_to_jiffies(1000));
4372 }
4373
4374 static void be_worker(struct work_struct *work)
4375 {
4376         struct be_adapter *adapter =
4377                 container_of(work, struct be_adapter, work.work);
4378         struct be_rx_obj *rxo;
4379         int i;
4380
4381         /* when interrupts are not yet enabled, just reap any pending
4382         * mcc completions */
4383         if (!netif_running(adapter->netdev)) {
4384                 local_bh_disable();
4385                 be_process_mcc(adapter);
4386                 local_bh_enable();
4387                 goto reschedule;
4388         }
4389
4390         if (!adapter->stats_cmd_sent) {
4391                 if (lancer_chip(adapter))
4392                         lancer_cmd_get_pport_stats(adapter,
4393                                                 &adapter->stats_cmd);
4394                 else
4395                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4396         }
4397
4398         if (be_physfn(adapter) &&
4399             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4400                 be_cmd_get_die_temperature(adapter);
4401
4402         for_all_rx_queues(adapter, rxo, i) {
4403                 /* Replenish RX-queues starved due to memory
4404                  * allocation failures.
4405                  */
4406                 if (rxo->rx_post_starved)
4407                         be_post_rx_frags(rxo, GFP_KERNEL);
4408         }
4409
4410         be_eqd_update(adapter);
4411
4412 reschedule:
4413         adapter->work_counter++;
4414         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4415 }
4416
4417 /* If any VFs are already enabled don't FLR the PF */
4418 static bool be_reset_required(struct be_adapter *adapter)
4419 {
4420         return pci_num_vf(adapter->pdev) ? false : true;
4421 }
4422
4423 static char *mc_name(struct be_adapter *adapter)
4424 {
4425         if (adapter->function_mode & FLEX10_MODE)
4426                 return "FLEX10";
4427         else if (adapter->function_mode & VNIC_MODE)
4428                 return "vNIC";
4429         else if (adapter->function_mode & UMC_ENABLED)
4430                 return "UMC";
4431         else
4432                 return "";
4433 }
4434
4435 static inline char *func_name(struct be_adapter *adapter)
4436 {
4437         return be_physfn(adapter) ? "PF" : "VF";
4438 }
4439
4440 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4441 {
4442         int status = 0;
4443         struct be_adapter *adapter;
4444         struct net_device *netdev;
4445         char port_name;
4446
4447         status = pci_enable_device(pdev);
4448         if (status)
4449                 goto do_none;
4450
4451         status = pci_request_regions(pdev, DRV_NAME);
4452         if (status)
4453                 goto disable_dev;
4454         pci_set_master(pdev);
4455
4456         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4457         if (netdev == NULL) {
4458                 status = -ENOMEM;
4459                 goto rel_reg;
4460         }
4461         adapter = netdev_priv(netdev);
4462         adapter->pdev = pdev;
4463         pci_set_drvdata(pdev, adapter);
4464         adapter->netdev = netdev;
4465         SET_NETDEV_DEV(netdev, &pdev->dev);
4466
4467         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4468         if (!status) {
4469                 netdev->features |= NETIF_F_HIGHDMA;
4470         } else {
4471                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4472                 if (status) {
4473                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4474                         goto free_netdev;
4475                 }
4476         }
4477
4478         if (be_physfn(adapter)) {
4479                 status = pci_enable_pcie_error_reporting(pdev);
4480                 if (!status)
4481                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4482         }
4483
4484         status = be_ctrl_init(adapter);
4485         if (status)
4486                 goto free_netdev;
4487
4488         /* sync up with fw's ready state */
4489         if (be_physfn(adapter)) {
4490                 status = be_fw_wait_ready(adapter);
4491                 if (status)
4492                         goto ctrl_clean;
4493         }
4494
4495         if (be_reset_required(adapter)) {
4496                 status = be_cmd_reset_function(adapter);
4497                 if (status)
4498                         goto ctrl_clean;
4499
4500                 /* Wait for interrupts to quiesce after an FLR */
4501                 msleep(100);
4502         }
4503
4504         /* Allow interrupts for other ULPs running on NIC function */
4505         be_intr_set(adapter, true);
4506
4507         /* tell fw we're ready to fire cmds */
4508         status = be_cmd_fw_init(adapter);
4509         if (status)
4510                 goto ctrl_clean;
4511
4512         status = be_stats_init(adapter);
4513         if (status)
4514                 goto ctrl_clean;
4515
4516         status = be_get_initial_config(adapter);
4517         if (status)
4518                 goto stats_clean;
4519
4520         INIT_DELAYED_WORK(&adapter->work, be_worker);
4521         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4522         adapter->rx_fc = adapter->tx_fc = true;
4523
4524         status = be_setup(adapter);
4525         if (status)
4526                 goto stats_clean;
4527
4528         be_netdev_init(netdev);
4529         status = register_netdev(netdev);
4530         if (status != 0)
4531                 goto unsetup;
4532
4533         be_roce_dev_add(adapter);
4534
4535         schedule_delayed_work(&adapter->func_recovery_work,
4536                               msecs_to_jiffies(1000));
4537
4538         be_cmd_query_port_name(adapter, &port_name);
4539
4540         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4541                  func_name(adapter), mc_name(adapter), port_name);
4542
4543         return 0;
4544
4545 unsetup:
4546         be_clear(adapter);
4547 stats_clean:
4548         be_stats_cleanup(adapter);
4549 ctrl_clean:
4550         be_ctrl_cleanup(adapter);
4551 free_netdev:
4552         free_netdev(netdev);
4553 rel_reg:
4554         pci_release_regions(pdev);
4555 disable_dev:
4556         pci_disable_device(pdev);
4557 do_none:
4558         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4559         return status;
4560 }
4561
4562 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4563 {
4564         struct be_adapter *adapter = pci_get_drvdata(pdev);
4565         struct net_device *netdev =  adapter->netdev;
4566
4567         if (adapter->wol_en)
4568                 be_setup_wol(adapter, true);
4569
4570         be_intr_set(adapter, false);
4571         cancel_delayed_work_sync(&adapter->func_recovery_work);
4572
4573         netif_device_detach(netdev);
4574         if (netif_running(netdev)) {
4575                 rtnl_lock();
4576                 be_close(netdev);
4577                 rtnl_unlock();
4578         }
4579         be_clear(adapter);
4580
4581         pci_save_state(pdev);
4582         pci_disable_device(pdev);
4583         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4584         return 0;
4585 }
4586
4587 static int be_resume(struct pci_dev *pdev)
4588 {
4589         int status = 0;
4590         struct be_adapter *adapter = pci_get_drvdata(pdev);
4591         struct net_device *netdev =  adapter->netdev;
4592
4593         netif_device_detach(netdev);
4594
4595         status = pci_enable_device(pdev);
4596         if (status)
4597                 return status;
4598
4599         pci_set_power_state(pdev, PCI_D0);
4600         pci_restore_state(pdev);
4601
4602         status = be_fw_wait_ready(adapter);
4603         if (status)
4604                 return status;
4605
4606         be_intr_set(adapter, true);
4607         /* tell fw we're ready to fire cmds */
4608         status = be_cmd_fw_init(adapter);
4609         if (status)
4610                 return status;
4611
4612         be_setup(adapter);
4613         if (netif_running(netdev)) {
4614                 rtnl_lock();
4615                 be_open(netdev);
4616                 rtnl_unlock();
4617         }
4618
4619         schedule_delayed_work(&adapter->func_recovery_work,
4620                               msecs_to_jiffies(1000));
4621         netif_device_attach(netdev);
4622
4623         if (adapter->wol_en)
4624                 be_setup_wol(adapter, false);
4625
4626         return 0;
4627 }
4628
4629 /*
4630  * An FLR will stop BE from DMAing any data.
4631  */
4632 static void be_shutdown(struct pci_dev *pdev)
4633 {
4634         struct be_adapter *adapter = pci_get_drvdata(pdev);
4635
4636         if (!adapter)
4637                 return;
4638
4639         cancel_delayed_work_sync(&adapter->work);
4640         cancel_delayed_work_sync(&adapter->func_recovery_work);
4641
4642         netif_device_detach(adapter->netdev);
4643
4644         be_cmd_reset_function(adapter);
4645
4646         pci_disable_device(pdev);
4647 }
4648
4649 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4650                                 pci_channel_state_t state)
4651 {
4652         struct be_adapter *adapter = pci_get_drvdata(pdev);
4653         struct net_device *netdev =  adapter->netdev;
4654
4655         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4656
4657         if (!adapter->eeh_error) {
4658                 adapter->eeh_error = true;
4659
4660                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4661
4662                 rtnl_lock();
4663                 netif_device_detach(netdev);
4664                 if (netif_running(netdev))
4665                         be_close(netdev);
4666                 rtnl_unlock();
4667
4668                 be_clear(adapter);
4669         }
4670
4671         if (state == pci_channel_io_perm_failure)
4672                 return PCI_ERS_RESULT_DISCONNECT;
4673
4674         pci_disable_device(pdev);
4675
4676         /* The error could cause the FW to trigger a flash debug dump.
4677          * Resetting the card while flash dump is in progress
4678          * can cause it not to recover; wait for it to finish.
4679          * Wait only for first function as it is needed only once per
4680          * adapter.
4681          */
4682         if (pdev->devfn == 0)
4683                 ssleep(30);
4684
4685         return PCI_ERS_RESULT_NEED_RESET;
4686 }
4687
4688 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4689 {
4690         struct be_adapter *adapter = pci_get_drvdata(pdev);
4691         int status;
4692
4693         dev_info(&adapter->pdev->dev, "EEH reset\n");
4694
4695         status = pci_enable_device(pdev);
4696         if (status)
4697                 return PCI_ERS_RESULT_DISCONNECT;
4698
4699         pci_set_master(pdev);
4700         pci_set_power_state(pdev, PCI_D0);
4701         pci_restore_state(pdev);
4702
4703         /* Check if card is ok and fw is ready */
4704         dev_info(&adapter->pdev->dev,
4705                  "Waiting for FW to be ready after EEH reset\n");
4706         status = be_fw_wait_ready(adapter);
4707         if (status)
4708                 return PCI_ERS_RESULT_DISCONNECT;
4709
4710         pci_cleanup_aer_uncorrect_error_status(pdev);
4711         be_clear_all_error(adapter);
4712         return PCI_ERS_RESULT_RECOVERED;
4713 }
4714
4715 static void be_eeh_resume(struct pci_dev *pdev)
4716 {
4717         int status = 0;
4718         struct be_adapter *adapter = pci_get_drvdata(pdev);
4719         struct net_device *netdev =  adapter->netdev;
4720
4721         dev_info(&adapter->pdev->dev, "EEH resume\n");
4722
4723         pci_save_state(pdev);
4724
4725         status = be_cmd_reset_function(adapter);
4726         if (status)
4727                 goto err;
4728
4729         /* tell fw we're ready to fire cmds */
4730         status = be_cmd_fw_init(adapter);
4731         if (status)
4732                 goto err;
4733
4734         status = be_setup(adapter);
4735         if (status)
4736                 goto err;
4737
4738         if (netif_running(netdev)) {
4739                 status = be_open(netdev);
4740                 if (status)
4741                         goto err;
4742         }
4743
4744         schedule_delayed_work(&adapter->func_recovery_work,
4745                               msecs_to_jiffies(1000));
4746         netif_device_attach(netdev);
4747         return;
4748 err:
4749         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4750 }
4751
4752 static const struct pci_error_handlers be_eeh_handlers = {
4753         .error_detected = be_eeh_err_detected,
4754         .slot_reset = be_eeh_reset,
4755         .resume = be_eeh_resume,
4756 };
4757
4758 static struct pci_driver be_driver = {
4759         .name = DRV_NAME,
4760         .id_table = be_dev_ids,
4761         .probe = be_probe,
4762         .remove = be_remove,
4763         .suspend = be_suspend,
4764         .resume = be_resume,
4765         .shutdown = be_shutdown,
4766         .err_handler = &be_eeh_handlers
4767 };
4768
4769 static int __init be_init_module(void)
4770 {
4771         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4772             rx_frag_size != 2048) {
4773                 printk(KERN_WARNING DRV_NAME
4774                         " : Module param rx_frag_size must be 2048/4096/8192."
4775                         " Using 2048\n");
4776                 rx_frag_size = 2048;
4777         }
4778
4779         return pci_register_driver(&be_driver);
4780 }
4781 module_init(be_init_module);
4782
4783 static void __exit be_exit_module(void)
4784 {
4785         pci_unregister_driver(&be_driver);
4786 }
4787 module_exit(be_exit_module);