]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/emulex/benet/be_main.c
0661e9379583ec215b644e6f4fed4d2076498b7c
[~andy/linux] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj *eqo)
1680 {
1681         int num = events_get(eqo);
1682
1683         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1684 }
1685
1686 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1687 {
1688         struct be_rx_page_info *page_info;
1689         struct be_queue_info *rxq = &rxo->q;
1690         struct be_queue_info *rx_cq = &rxo->cq;
1691         struct be_rx_compl_info *rxcp;
1692         u16 tail;
1693
1694         /* First cleanup pending rx completions */
1695         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1696                 be_rx_compl_discard(rxo, rxcp);
1697                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1698         }
1699
1700         /* Then free posted rx buffer that were not used */
1701         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1702         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1703                 page_info = get_rx_page_info(rxo, tail);
1704                 put_page(page_info->page);
1705                 memset(page_info, 0, sizeof(*page_info));
1706         }
1707         BUG_ON(atomic_read(&rxq->used));
1708         rxq->tail = rxq->head = 0;
1709 }
1710
1711 static void be_tx_compl_clean(struct be_adapter *adapter)
1712 {
1713         struct be_tx_obj *txo;
1714         struct be_queue_info *txq;
1715         struct be_eth_tx_compl *txcp;
1716         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1717         struct sk_buff *sent_skb;
1718         bool dummy_wrb;
1719         int i, pending_txqs;
1720
1721         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1722         do {
1723                 pending_txqs = adapter->num_tx_qs;
1724
1725                 for_all_tx_queues(adapter, txo, i) {
1726                         txq = &txo->q;
1727                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1728                                 end_idx =
1729                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1730                                                       wrb_index, txcp);
1731                                 num_wrbs += be_tx_compl_process(adapter, txo,
1732                                                                 end_idx);
1733                                 cmpl++;
1734                         }
1735                         if (cmpl) {
1736                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1737                                 atomic_sub(num_wrbs, &txq->used);
1738                                 cmpl = 0;
1739                                 num_wrbs = 0;
1740                         }
1741                         if (atomic_read(&txq->used) == 0)
1742                                 pending_txqs--;
1743                 }
1744
1745                 if (pending_txqs == 0 || ++timeo > 200)
1746                         break;
1747
1748                 mdelay(1);
1749         } while (true);
1750
1751         for_all_tx_queues(adapter, txo, i) {
1752                 txq = &txo->q;
1753                 if (atomic_read(&txq->used))
1754                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1755                                 atomic_read(&txq->used));
1756
1757                 /* free posted tx for which compls will never arrive */
1758                 while (atomic_read(&txq->used)) {
1759                         sent_skb = txo->sent_skb_list[txq->tail];
1760                         end_idx = txq->tail;
1761                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1762                                                    &dummy_wrb);
1763                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1764                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1765                         atomic_sub(num_wrbs, &txq->used);
1766                 }
1767         }
1768 }
1769
1770 static void be_evt_queues_destroy(struct be_adapter *adapter)
1771 {
1772         struct be_eq_obj *eqo;
1773         int i;
1774
1775         for_all_evt_queues(adapter, eqo, i) {
1776                 if (eqo->q.created) {
1777                         be_eq_clean(eqo);
1778                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1779                 }
1780                 be_queue_free(adapter, &eqo->q);
1781         }
1782 }
1783
1784 static int be_evt_queues_create(struct be_adapter *adapter)
1785 {
1786         struct be_queue_info *eq;
1787         struct be_eq_obj *eqo;
1788         int i, rc;
1789
1790         adapter->num_evt_qs = num_irqs(adapter);
1791
1792         for_all_evt_queues(adapter, eqo, i) {
1793                 eqo->adapter = adapter;
1794                 eqo->tx_budget = BE_TX_BUDGET;
1795                 eqo->idx = i;
1796                 eqo->max_eqd = BE_MAX_EQD;
1797                 eqo->enable_aic = true;
1798
1799                 eq = &eqo->q;
1800                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1801                                         sizeof(struct be_eq_entry));
1802                 if (rc)
1803                         return rc;
1804
1805                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1806                 if (rc)
1807                         return rc;
1808         }
1809         return 0;
1810 }
1811
1812 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1813 {
1814         struct be_queue_info *q;
1815
1816         q = &adapter->mcc_obj.q;
1817         if (q->created)
1818                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1819         be_queue_free(adapter, q);
1820
1821         q = &adapter->mcc_obj.cq;
1822         if (q->created)
1823                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1824         be_queue_free(adapter, q);
1825 }
1826
1827 /* Must be called only after TX qs are created as MCC shares TX EQ */
1828 static int be_mcc_queues_create(struct be_adapter *adapter)
1829 {
1830         struct be_queue_info *q, *cq;
1831
1832         cq = &adapter->mcc_obj.cq;
1833         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1834                         sizeof(struct be_mcc_compl)))
1835                 goto err;
1836
1837         /* Use the default EQ for MCC completions */
1838         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1839                 goto mcc_cq_free;
1840
1841         q = &adapter->mcc_obj.q;
1842         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1843                 goto mcc_cq_destroy;
1844
1845         if (be_cmd_mccq_create(adapter, q, cq))
1846                 goto mcc_q_free;
1847
1848         return 0;
1849
1850 mcc_q_free:
1851         be_queue_free(adapter, q);
1852 mcc_cq_destroy:
1853         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1854 mcc_cq_free:
1855         be_queue_free(adapter, cq);
1856 err:
1857         return -1;
1858 }
1859
1860 static void be_tx_queues_destroy(struct be_adapter *adapter)
1861 {
1862         struct be_queue_info *q;
1863         struct be_tx_obj *txo;
1864         u8 i;
1865
1866         for_all_tx_queues(adapter, txo, i) {
1867                 q = &txo->q;
1868                 if (q->created)
1869                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1870                 be_queue_free(adapter, q);
1871
1872                 q = &txo->cq;
1873                 if (q->created)
1874                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1875                 be_queue_free(adapter, q);
1876         }
1877 }
1878
1879 static int be_num_txqs_want(struct be_adapter *adapter)
1880 {
1881         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1882             be_is_mc(adapter) ||
1883             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1884             BE2_chip(adapter))
1885                 return 1;
1886         else
1887                 return adapter->max_tx_queues;
1888 }
1889
1890 static int be_tx_cqs_create(struct be_adapter *adapter)
1891 {
1892         struct be_queue_info *cq, *eq;
1893         int status;
1894         struct be_tx_obj *txo;
1895         u8 i;
1896
1897         adapter->num_tx_qs = be_num_txqs_want(adapter);
1898         if (adapter->num_tx_qs != MAX_TX_QS) {
1899                 rtnl_lock();
1900                 netif_set_real_num_tx_queues(adapter->netdev,
1901                         adapter->num_tx_qs);
1902                 rtnl_unlock();
1903         }
1904
1905         for_all_tx_queues(adapter, txo, i) {
1906                 cq = &txo->cq;
1907                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1908                                         sizeof(struct be_eth_tx_compl));
1909                 if (status)
1910                         return status;
1911
1912                 /* If num_evt_qs is less than num_tx_qs, then more than
1913                  * one txq share an eq
1914                  */
1915                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1916                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1917                 if (status)
1918                         return status;
1919         }
1920         return 0;
1921 }
1922
1923 static int be_tx_qs_create(struct be_adapter *adapter)
1924 {
1925         struct be_tx_obj *txo;
1926         int i, status;
1927
1928         for_all_tx_queues(adapter, txo, i) {
1929                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1930                                         sizeof(struct be_eth_wrb));
1931                 if (status)
1932                         return status;
1933
1934                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1935                 if (status)
1936                         return status;
1937         }
1938
1939         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1940                  adapter->num_tx_qs);
1941         return 0;
1942 }
1943
1944 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1945 {
1946         struct be_queue_info *q;
1947         struct be_rx_obj *rxo;
1948         int i;
1949
1950         for_all_rx_queues(adapter, rxo, i) {
1951                 q = &rxo->cq;
1952                 if (q->created)
1953                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1954                 be_queue_free(adapter, q);
1955         }
1956 }
1957
1958 static int be_rx_cqs_create(struct be_adapter *adapter)
1959 {
1960         struct be_queue_info *eq, *cq;
1961         struct be_rx_obj *rxo;
1962         int rc, i;
1963
1964         /* We'll create as many RSS rings as there are irqs.
1965          * But when there's only one irq there's no use creating RSS rings
1966          */
1967         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1968                                 num_irqs(adapter) + 1 : 1;
1969         if (adapter->num_rx_qs != MAX_RX_QS) {
1970                 rtnl_lock();
1971                 netif_set_real_num_rx_queues(adapter->netdev,
1972                                              adapter->num_rx_qs);
1973                 rtnl_unlock();
1974         }
1975
1976         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1977         for_all_rx_queues(adapter, rxo, i) {
1978                 rxo->adapter = adapter;
1979                 cq = &rxo->cq;
1980                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1981                                 sizeof(struct be_eth_rx_compl));
1982                 if (rc)
1983                         return rc;
1984
1985                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1986                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1987                 if (rc)
1988                         return rc;
1989         }
1990
1991         dev_info(&adapter->pdev->dev,
1992                  "created %d RSS queue(s) and 1 default RX queue\n",
1993                  adapter->num_rx_qs - 1);
1994         return 0;
1995 }
1996
1997 static irqreturn_t be_intx(int irq, void *dev)
1998 {
1999         struct be_eq_obj *eqo = dev;
2000         struct be_adapter *adapter = eqo->adapter;
2001         int num_evts = 0;
2002
2003         /* On Lancer, clear-intr bit of the EQ DB does not work.
2004          * INTx is de-asserted only on notifying num evts.
2005          */
2006         if (lancer_chip(adapter))
2007                 num_evts = events_get(eqo);
2008
2009         /* The EQ-notify may not de-assert INTx rightaway, causing
2010          * the ISR to be invoked again. So, return HANDLED even when
2011          * num_evts is zero.
2012          */
2013         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2014         napi_schedule(&eqo->napi);
2015         return IRQ_HANDLED;
2016 }
2017
2018 static irqreturn_t be_msix(int irq, void *dev)
2019 {
2020         struct be_eq_obj *eqo = dev;
2021
2022         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2023         napi_schedule(&eqo->napi);
2024         return IRQ_HANDLED;
2025 }
2026
2027 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2028 {
2029         return (rxcp->tcpf && !rxcp->err) ? true : false;
2030 }
2031
2032 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2033                         int budget)
2034 {
2035         struct be_adapter *adapter = rxo->adapter;
2036         struct be_queue_info *rx_cq = &rxo->cq;
2037         struct be_rx_compl_info *rxcp;
2038         u32 work_done;
2039
2040         for (work_done = 0; work_done < budget; work_done++) {
2041                 rxcp = be_rx_compl_get(rxo);
2042                 if (!rxcp)
2043                         break;
2044
2045                 /* Is it a flush compl that has no data */
2046                 if (unlikely(rxcp->num_rcvd == 0))
2047                         goto loop_continue;
2048
2049                 /* Discard compl with partial DMA Lancer B0 */
2050                 if (unlikely(!rxcp->pkt_size)) {
2051                         be_rx_compl_discard(rxo, rxcp);
2052                         goto loop_continue;
2053                 }
2054
2055                 /* On BE drop pkts that arrive due to imperfect filtering in
2056                  * promiscuous mode on some skews
2057                  */
2058                 if (unlikely(rxcp->port != adapter->port_num &&
2059                                 !lancer_chip(adapter))) {
2060                         be_rx_compl_discard(rxo, rxcp);
2061                         goto loop_continue;
2062                 }
2063
2064                 if (do_gro(rxcp))
2065                         be_rx_compl_process_gro(rxo, napi, rxcp);
2066                 else
2067                         be_rx_compl_process(rxo, rxcp);
2068 loop_continue:
2069                 be_rx_stats_update(rxo, rxcp);
2070         }
2071
2072         if (work_done) {
2073                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2074
2075                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2076                         be_post_rx_frags(rxo, GFP_ATOMIC);
2077         }
2078
2079         return work_done;
2080 }
2081
2082 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2083                           int budget, int idx)
2084 {
2085         struct be_eth_tx_compl *txcp;
2086         int num_wrbs = 0, work_done;
2087
2088         for (work_done = 0; work_done < budget; work_done++) {
2089                 txcp = be_tx_compl_get(&txo->cq);
2090                 if (!txcp)
2091                         break;
2092                 num_wrbs += be_tx_compl_process(adapter, txo,
2093                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2094                                         wrb_index, txcp));
2095         }
2096
2097         if (work_done) {
2098                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2099                 atomic_sub(num_wrbs, &txo->q.used);
2100
2101                 /* As Tx wrbs have been freed up, wake up netdev queue
2102                  * if it was stopped due to lack of tx wrbs.  */
2103                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2104                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2105                         netif_wake_subqueue(adapter->netdev, idx);
2106                 }
2107
2108                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2109                 tx_stats(txo)->tx_compl += work_done;
2110                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2111         }
2112         return (work_done < budget); /* Done */
2113 }
2114
2115 int be_poll(struct napi_struct *napi, int budget)
2116 {
2117         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2118         struct be_adapter *adapter = eqo->adapter;
2119         int max_work = 0, work, i, num_evts;
2120         bool tx_done;
2121
2122         num_evts = events_get(eqo);
2123
2124         /* Process all TXQs serviced by this EQ */
2125         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2126                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2127                                         eqo->tx_budget, i);
2128                 if (!tx_done)
2129                         max_work = budget;
2130         }
2131
2132         /* This loop will iterate twice for EQ0 in which
2133          * completions of the last RXQ (default one) are also processed
2134          * For other EQs the loop iterates only once
2135          */
2136         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2137                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2138                 max_work = max(work, max_work);
2139         }
2140
2141         if (is_mcc_eqo(eqo))
2142                 be_process_mcc(adapter);
2143
2144         if (max_work < budget) {
2145                 napi_complete(napi);
2146                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2147         } else {
2148                 /* As we'll continue in polling mode, count and clear events */
2149                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2150         }
2151         return max_work;
2152 }
2153
2154 void be_detect_error(struct be_adapter *adapter)
2155 {
2156         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2157         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2158         u32 i;
2159
2160         if (be_crit_error(adapter))
2161                 return;
2162
2163         if (lancer_chip(adapter)) {
2164                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2165                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2166                         sliport_err1 = ioread32(adapter->db +
2167                                         SLIPORT_ERROR1_OFFSET);
2168                         sliport_err2 = ioread32(adapter->db +
2169                                         SLIPORT_ERROR2_OFFSET);
2170                 }
2171         } else {
2172                 pci_read_config_dword(adapter->pdev,
2173                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2174                 pci_read_config_dword(adapter->pdev,
2175                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2176                 pci_read_config_dword(adapter->pdev,
2177                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2178                 pci_read_config_dword(adapter->pdev,
2179                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2180
2181                 ue_lo = (ue_lo & ~ue_lo_mask);
2182                 ue_hi = (ue_hi & ~ue_hi_mask);
2183         }
2184
2185         /* On certain platforms BE hardware can indicate spurious UEs.
2186          * Allow the h/w to stop working completely in case of a real UE.
2187          * Hence not setting the hw_error for UE detection.
2188          */
2189         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2190                 adapter->hw_error = true;
2191                 dev_err(&adapter->pdev->dev,
2192                         "Error detected in the card\n");
2193         }
2194
2195         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2196                 dev_err(&adapter->pdev->dev,
2197                         "ERR: sliport status 0x%x\n", sliport_status);
2198                 dev_err(&adapter->pdev->dev,
2199                         "ERR: sliport error1 0x%x\n", sliport_err1);
2200                 dev_err(&adapter->pdev->dev,
2201                         "ERR: sliport error2 0x%x\n", sliport_err2);
2202         }
2203
2204         if (ue_lo) {
2205                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2206                         if (ue_lo & 1)
2207                                 dev_err(&adapter->pdev->dev,
2208                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2209                 }
2210         }
2211
2212         if (ue_hi) {
2213                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2214                         if (ue_hi & 1)
2215                                 dev_err(&adapter->pdev->dev,
2216                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2217                 }
2218         }
2219
2220 }
2221
2222 static void be_msix_disable(struct be_adapter *adapter)
2223 {
2224         if (msix_enabled(adapter)) {
2225                 pci_disable_msix(adapter->pdev);
2226                 adapter->num_msix_vec = 0;
2227         }
2228 }
2229
2230 static uint be_num_rss_want(struct be_adapter *adapter)
2231 {
2232         u32 num = 0;
2233
2234         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2235             (lancer_chip(adapter) ||
2236              (!sriov_want(adapter) && be_physfn(adapter)))) {
2237                 num = adapter->max_rss_queues;
2238                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2239         }
2240         return num;
2241 }
2242
2243 static void be_msix_enable(struct be_adapter *adapter)
2244 {
2245 #define BE_MIN_MSIX_VECTORS             1
2246         int i, status, num_vec, num_roce_vec = 0;
2247         struct device *dev = &adapter->pdev->dev;
2248
2249         /* If RSS queues are not used, need a vec for default RX Q */
2250         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2251         if (be_roce_supported(adapter)) {
2252                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2253                                         (num_online_cpus() + 1));
2254                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2255                 num_vec += num_roce_vec;
2256                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2257         }
2258         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2259
2260         for (i = 0; i < num_vec; i++)
2261                 adapter->msix_entries[i].entry = i;
2262
2263         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2264         if (status == 0) {
2265                 goto done;
2266         } else if (status >= BE_MIN_MSIX_VECTORS) {
2267                 num_vec = status;
2268                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2269                                 num_vec) == 0)
2270                         goto done;
2271         }
2272
2273         dev_warn(dev, "MSIx enable failed\n");
2274         return;
2275 done:
2276         if (be_roce_supported(adapter)) {
2277                 if (num_vec > num_roce_vec) {
2278                         adapter->num_msix_vec = num_vec - num_roce_vec;
2279                         adapter->num_msix_roce_vec =
2280                                 num_vec - adapter->num_msix_vec;
2281                 } else {
2282                         adapter->num_msix_vec = num_vec;
2283                         adapter->num_msix_roce_vec = 0;
2284                 }
2285         } else
2286                 adapter->num_msix_vec = num_vec;
2287         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2288         return;
2289 }
2290
2291 static inline int be_msix_vec_get(struct be_adapter *adapter,
2292                                 struct be_eq_obj *eqo)
2293 {
2294         return adapter->msix_entries[eqo->idx].vector;
2295 }
2296
2297 static int be_msix_register(struct be_adapter *adapter)
2298 {
2299         struct net_device *netdev = adapter->netdev;
2300         struct be_eq_obj *eqo;
2301         int status, i, vec;
2302
2303         for_all_evt_queues(adapter, eqo, i) {
2304                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2305                 vec = be_msix_vec_get(adapter, eqo);
2306                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2307                 if (status)
2308                         goto err_msix;
2309         }
2310
2311         return 0;
2312 err_msix:
2313         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2314                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2315         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2316                 status);
2317         be_msix_disable(adapter);
2318         return status;
2319 }
2320
2321 static int be_irq_register(struct be_adapter *adapter)
2322 {
2323         struct net_device *netdev = adapter->netdev;
2324         int status;
2325
2326         if (msix_enabled(adapter)) {
2327                 status = be_msix_register(adapter);
2328                 if (status == 0)
2329                         goto done;
2330                 /* INTx is not supported for VF */
2331                 if (!be_physfn(adapter))
2332                         return status;
2333         }
2334
2335         /* INTx: only the first EQ is used */
2336         netdev->irq = adapter->pdev->irq;
2337         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2338                              &adapter->eq_obj[0]);
2339         if (status) {
2340                 dev_err(&adapter->pdev->dev,
2341                         "INTx request IRQ failed - err %d\n", status);
2342                 return status;
2343         }
2344 done:
2345         adapter->isr_registered = true;
2346         return 0;
2347 }
2348
2349 static void be_irq_unregister(struct be_adapter *adapter)
2350 {
2351         struct net_device *netdev = adapter->netdev;
2352         struct be_eq_obj *eqo;
2353         int i;
2354
2355         if (!adapter->isr_registered)
2356                 return;
2357
2358         /* INTx */
2359         if (!msix_enabled(adapter)) {
2360                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2361                 goto done;
2362         }
2363
2364         /* MSIx */
2365         for_all_evt_queues(adapter, eqo, i)
2366                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2367
2368 done:
2369         adapter->isr_registered = false;
2370 }
2371
2372 static void be_rx_qs_destroy(struct be_adapter *adapter)
2373 {
2374         struct be_queue_info *q;
2375         struct be_rx_obj *rxo;
2376         int i;
2377
2378         for_all_rx_queues(adapter, rxo, i) {
2379                 q = &rxo->q;
2380                 if (q->created) {
2381                         be_cmd_rxq_destroy(adapter, q);
2382                         /* After the rxq is invalidated, wait for a grace time
2383                          * of 1ms for all dma to end and the flush compl to
2384                          * arrive
2385                          */
2386                         mdelay(1);
2387                         be_rx_cq_clean(rxo);
2388                 }
2389                 be_queue_free(adapter, q);
2390         }
2391 }
2392
2393 static int be_close(struct net_device *netdev)
2394 {
2395         struct be_adapter *adapter = netdev_priv(netdev);
2396         struct be_eq_obj *eqo;
2397         int i;
2398
2399         be_roce_dev_close(adapter);
2400
2401         be_async_mcc_disable(adapter);
2402
2403         if (!lancer_chip(adapter))
2404                 be_intr_set(adapter, false);
2405
2406         for_all_evt_queues(adapter, eqo, i) {
2407                 napi_disable(&eqo->napi);
2408                 if (msix_enabled(adapter))
2409                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2410                 else
2411                         synchronize_irq(netdev->irq);
2412                 be_eq_clean(eqo);
2413         }
2414
2415         be_irq_unregister(adapter);
2416
2417         /* Wait for all pending tx completions to arrive so that
2418          * all tx skbs are freed.
2419          */
2420         be_tx_compl_clean(adapter);
2421
2422         be_rx_qs_destroy(adapter);
2423         return 0;
2424 }
2425
2426 static int be_rx_qs_create(struct be_adapter *adapter)
2427 {
2428         struct be_rx_obj *rxo;
2429         int rc, i, j;
2430         u8 rsstable[128];
2431
2432         for_all_rx_queues(adapter, rxo, i) {
2433                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2434                                     sizeof(struct be_eth_rx_d));
2435                 if (rc)
2436                         return rc;
2437         }
2438
2439         /* The FW would like the default RXQ to be created first */
2440         rxo = default_rxo(adapter);
2441         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2442                                adapter->if_handle, false, &rxo->rss_id);
2443         if (rc)
2444                 return rc;
2445
2446         for_all_rss_queues(adapter, rxo, i) {
2447                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2448                                        rx_frag_size, adapter->if_handle,
2449                                        true, &rxo->rss_id);
2450                 if (rc)
2451                         return rc;
2452         }
2453
2454         if (be_multi_rxq(adapter)) {
2455                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2456                         for_all_rss_queues(adapter, rxo, i) {
2457                                 if ((j + i) >= 128)
2458                                         break;
2459                                 rsstable[j + i] = rxo->rss_id;
2460                         }
2461                 }
2462                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2463                 if (rc)
2464                         return rc;
2465         }
2466
2467         /* First time posting */
2468         for_all_rx_queues(adapter, rxo, i)
2469                 be_post_rx_frags(rxo, GFP_KERNEL);
2470         return 0;
2471 }
2472
2473 static int be_open(struct net_device *netdev)
2474 {
2475         struct be_adapter *adapter = netdev_priv(netdev);
2476         struct be_eq_obj *eqo;
2477         struct be_rx_obj *rxo;
2478         struct be_tx_obj *txo;
2479         u8 link_status;
2480         int status, i;
2481
2482         status = be_rx_qs_create(adapter);
2483         if (status)
2484                 goto err;
2485
2486         be_irq_register(adapter);
2487
2488         if (!lancer_chip(adapter))
2489                 be_intr_set(adapter, true);
2490
2491         for_all_rx_queues(adapter, rxo, i)
2492                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2493
2494         for_all_tx_queues(adapter, txo, i)
2495                 be_cq_notify(adapter, txo->cq.id, true, 0);
2496
2497         be_async_mcc_enable(adapter);
2498
2499         for_all_evt_queues(adapter, eqo, i) {
2500                 napi_enable(&eqo->napi);
2501                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2502         }
2503
2504         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2505         if (!status)
2506                 be_link_status_update(adapter, link_status);
2507
2508         be_roce_dev_open(adapter);
2509         return 0;
2510 err:
2511         be_close(adapter->netdev);
2512         return -EIO;
2513 }
2514
2515 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2516 {
2517         struct be_dma_mem cmd;
2518         int status = 0;
2519         u8 mac[ETH_ALEN];
2520
2521         memset(mac, 0, ETH_ALEN);
2522
2523         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2524         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2525                                     GFP_KERNEL);
2526         if (cmd.va == NULL)
2527                 return -1;
2528         memset(cmd.va, 0, cmd.size);
2529
2530         if (enable) {
2531                 status = pci_write_config_dword(adapter->pdev,
2532                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2533                 if (status) {
2534                         dev_err(&adapter->pdev->dev,
2535                                 "Could not enable Wake-on-lan\n");
2536                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2537                                           cmd.dma);
2538                         return status;
2539                 }
2540                 status = be_cmd_enable_magic_wol(adapter,
2541                                 adapter->netdev->dev_addr, &cmd);
2542                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2543                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2544         } else {
2545                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2546                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2547                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2548         }
2549
2550         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2551         return status;
2552 }
2553
2554 /*
2555  * Generate a seed MAC address from the PF MAC Address using jhash.
2556  * MAC Address for VFs are assigned incrementally starting from the seed.
2557  * These addresses are programmed in the ASIC by the PF and the VF driver
2558  * queries for the MAC address during its probe.
2559  */
2560 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2561 {
2562         u32 vf;
2563         int status = 0;
2564         u8 mac[ETH_ALEN];
2565         struct be_vf_cfg *vf_cfg;
2566
2567         be_vf_eth_addr_generate(adapter, mac);
2568
2569         for_all_vfs(adapter, vf_cfg, vf) {
2570                 if (lancer_chip(adapter)) {
2571                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2572                 } else {
2573                         status = be_cmd_pmac_add(adapter, mac,
2574                                                  vf_cfg->if_handle,
2575                                                  &vf_cfg->pmac_id, vf + 1);
2576                 }
2577
2578                 if (status)
2579                         dev_err(&adapter->pdev->dev,
2580                         "Mac address assignment failed for VF %d\n", vf);
2581                 else
2582                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2583
2584                 mac[5] += 1;
2585         }
2586         return status;
2587 }
2588
2589 static void be_vf_clear(struct be_adapter *adapter)
2590 {
2591         struct be_vf_cfg *vf_cfg;
2592         u32 vf;
2593
2594         if (be_find_vfs(adapter, ASSIGNED)) {
2595                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2596                 goto done;
2597         }
2598
2599         for_all_vfs(adapter, vf_cfg, vf) {
2600                 if (lancer_chip(adapter))
2601                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2602                 else
2603                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2604                                         vf_cfg->pmac_id, vf + 1);
2605
2606                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2607         }
2608         pci_disable_sriov(adapter->pdev);
2609 done:
2610         kfree(adapter->vf_cfg);
2611         adapter->num_vfs = 0;
2612 }
2613
2614 static int be_clear(struct be_adapter *adapter)
2615 {
2616         int i = 1;
2617
2618         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2619                 cancel_delayed_work_sync(&adapter->work);
2620                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2621         }
2622
2623         if (sriov_enabled(adapter))
2624                 be_vf_clear(adapter);
2625
2626         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2627                 be_cmd_pmac_del(adapter, adapter->if_handle,
2628                         adapter->pmac_id[i], 0);
2629
2630         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2631
2632         be_mcc_queues_destroy(adapter);
2633         be_rx_cqs_destroy(adapter);
2634         be_tx_queues_destroy(adapter);
2635         be_evt_queues_destroy(adapter);
2636
2637         kfree(adapter->pmac_id);
2638         adapter->pmac_id = NULL;
2639
2640         be_msix_disable(adapter);
2641         return 0;
2642 }
2643
2644 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2645                                    u32 *cap_flags, u8 domain)
2646 {
2647         bool profile_present = false;
2648         int status;
2649
2650         if (lancer_chip(adapter)) {
2651                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2652                 if (!status)
2653                         profile_present = true;
2654         }
2655
2656         if (!profile_present)
2657                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2658                              BE_IF_FLAGS_MULTICAST;
2659 }
2660
2661 static int be_vf_setup_init(struct be_adapter *adapter)
2662 {
2663         struct be_vf_cfg *vf_cfg;
2664         int vf;
2665
2666         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2667                                   GFP_KERNEL);
2668         if (!adapter->vf_cfg)
2669                 return -ENOMEM;
2670
2671         for_all_vfs(adapter, vf_cfg, vf) {
2672                 vf_cfg->if_handle = -1;
2673                 vf_cfg->pmac_id = -1;
2674         }
2675         return 0;
2676 }
2677
2678 static int be_vf_setup(struct be_adapter *adapter)
2679 {
2680         struct be_vf_cfg *vf_cfg;
2681         struct device *dev = &adapter->pdev->dev;
2682         u32 cap_flags, en_flags, vf;
2683         u16 def_vlan, lnk_speed;
2684         int status, enabled_vfs;
2685
2686         enabled_vfs = be_find_vfs(adapter, ENABLED);
2687         if (enabled_vfs) {
2688                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2689                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2690                 return 0;
2691         }
2692
2693         if (num_vfs > adapter->dev_num_vfs) {
2694                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2695                          adapter->dev_num_vfs, num_vfs);
2696                 num_vfs = adapter->dev_num_vfs;
2697         }
2698
2699         status = pci_enable_sriov(adapter->pdev, num_vfs);
2700         if (!status) {
2701                 adapter->num_vfs = num_vfs;
2702         } else {
2703                 /* Platform doesn't support SRIOV though device supports it */
2704                 dev_warn(dev, "SRIOV enable failed\n");
2705                 return 0;
2706         }
2707
2708         status = be_vf_setup_init(adapter);
2709         if (status)
2710                 goto err;
2711
2712         for_all_vfs(adapter, vf_cfg, vf) {
2713                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2714
2715                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2716                                         BE_IF_FLAGS_BROADCAST |
2717                                         BE_IF_FLAGS_MULTICAST);
2718
2719                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2720                                           &vf_cfg->if_handle, vf + 1);
2721                 if (status)
2722                         goto err;
2723         }
2724
2725         if (!enabled_vfs) {
2726                 status = be_vf_eth_addr_config(adapter);
2727                 if (status)
2728                         goto err;
2729         }
2730
2731         for_all_vfs(adapter, vf_cfg, vf) {
2732                 lnk_speed = 1000;
2733                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2734                 if (status)
2735                         goto err;
2736                 vf_cfg->tx_rate = lnk_speed * 10;
2737
2738                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2739                                 vf + 1, vf_cfg->if_handle);
2740                 if (status)
2741                         goto err;
2742                 vf_cfg->def_vid = def_vlan;
2743
2744                 be_cmd_enable_vf(adapter, vf + 1);
2745         }
2746         return 0;
2747 err:
2748         return status;
2749 }
2750
2751 static void be_setup_init(struct be_adapter *adapter)
2752 {
2753         adapter->vlan_prio_bmap = 0xff;
2754         adapter->phy.link_speed = -1;
2755         adapter->if_handle = -1;
2756         adapter->be3_native = false;
2757         adapter->promiscuous = false;
2758         if (be_physfn(adapter))
2759                 adapter->cmd_privileges = MAX_PRIVILEGES;
2760         else
2761                 adapter->cmd_privileges = MIN_PRIVILEGES;
2762 }
2763
2764 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2765                            bool *active_mac, u32 *pmac_id)
2766 {
2767         int status = 0;
2768
2769         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2770                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2771                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2772                         *active_mac = true;
2773                 else
2774                         *active_mac = false;
2775
2776                 return status;
2777         }
2778
2779         if (lancer_chip(adapter)) {
2780                 status = be_cmd_get_mac_from_list(adapter, mac,
2781                                                   active_mac, pmac_id, 0);
2782                 if (*active_mac) {
2783                         status = be_cmd_mac_addr_query(adapter, mac, false,
2784                                                        if_handle, *pmac_id);
2785                 }
2786         } else if (be_physfn(adapter)) {
2787                 /* For BE3, for PF get permanent MAC */
2788                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2789                 *active_mac = false;
2790         } else {
2791                 /* For BE3, for VF get soft MAC assigned by PF*/
2792                 status = be_cmd_mac_addr_query(adapter, mac, false,
2793                                                if_handle, 0);
2794                 *active_mac = true;
2795         }
2796         return status;
2797 }
2798
2799 static void be_get_resources(struct be_adapter *adapter)
2800 {
2801         int status;
2802         bool profile_present = false;
2803
2804         if (lancer_chip(adapter)) {
2805                 status = be_cmd_get_func_config(adapter);
2806
2807                 if (!status)
2808                         profile_present = true;
2809         }
2810
2811         if (profile_present) {
2812                 /* Sanity fixes for Lancer */
2813                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2814                                               BE_UC_PMAC_COUNT);
2815                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2816                                            BE_NUM_VLANS_SUPPORTED);
2817                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2818                                                BE_MAX_MC);
2819                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2820                                                MAX_TX_QS);
2821                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2822                                                 BE3_MAX_RSS_QS);
2823                 adapter->max_event_queues = min_t(u16,
2824                                                   adapter->max_event_queues,
2825                                                   BE3_MAX_RSS_QS);
2826
2827                 if (adapter->max_rss_queues &&
2828                     adapter->max_rss_queues == adapter->max_rx_queues)
2829                         adapter->max_rss_queues -= 1;
2830
2831                 if (adapter->max_event_queues < adapter->max_rss_queues)
2832                         adapter->max_rss_queues = adapter->max_event_queues;
2833
2834         } else {
2835                 if (be_physfn(adapter))
2836                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2837                 else
2838                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2839
2840                 if (adapter->function_mode & FLEX10_MODE)
2841                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2842                 else
2843                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2844
2845                 adapter->max_mcast_mac = BE_MAX_MC;
2846                 adapter->max_tx_queues = MAX_TX_QS;
2847                 adapter->max_rss_queues = (adapter->be3_native) ?
2848                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2849                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2850
2851                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2852                                         BE_IF_FLAGS_BROADCAST |
2853                                         BE_IF_FLAGS_MULTICAST |
2854                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2855                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2856                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2857                                         BE_IF_FLAGS_PROMISCUOUS;
2858
2859                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2860                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2861         }
2862 }
2863
2864 /* Routine to query per function resource limits */
2865 static int be_get_config(struct be_adapter *adapter)
2866 {
2867         int pos, status;
2868         u16 dev_num_vfs;
2869
2870         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2871                                      &adapter->function_mode,
2872                                      &adapter->function_caps);
2873         if (status)
2874                 goto err;
2875
2876         be_get_resources(adapter);
2877
2878         /* primary mac needs 1 pmac entry */
2879         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2880                                    sizeof(u32), GFP_KERNEL);
2881         if (!adapter->pmac_id) {
2882                 status = -ENOMEM;
2883                 goto err;
2884         }
2885
2886         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2887         if (pos) {
2888                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2889                                      &dev_num_vfs);
2890                 if (!lancer_chip(adapter))
2891                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2892                 adapter->dev_num_vfs = dev_num_vfs;
2893         }
2894 err:
2895         return status;
2896 }
2897
2898 static int be_setup(struct be_adapter *adapter)
2899 {
2900         struct device *dev = &adapter->pdev->dev;
2901         u32 en_flags;
2902         u32 tx_fc, rx_fc;
2903         int status;
2904         u8 mac[ETH_ALEN];
2905         bool active_mac;
2906
2907         be_setup_init(adapter);
2908
2909         if (!lancer_chip(adapter))
2910                 be_cmd_req_native_mode(adapter);
2911
2912         status = be_get_config(adapter);
2913         if (status)
2914                 goto err;
2915
2916         be_msix_enable(adapter);
2917
2918         status = be_evt_queues_create(adapter);
2919         if (status)
2920                 goto err;
2921
2922         status = be_tx_cqs_create(adapter);
2923         if (status)
2924                 goto err;
2925
2926         status = be_rx_cqs_create(adapter);
2927         if (status)
2928                 goto err;
2929
2930         status = be_mcc_queues_create(adapter);
2931         if (status)
2932                 goto err;
2933
2934         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2935         /* In UMC mode FW does not return right privileges.
2936          * Override with correct privilege equivalent to PF.
2937          */
2938         if (be_is_mc(adapter))
2939                 adapter->cmd_privileges = MAX_PRIVILEGES;
2940
2941         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2942                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2943
2944         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2945                 en_flags |= BE_IF_FLAGS_RSS;
2946
2947         en_flags = en_flags & adapter->if_cap_flags;
2948
2949         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2950                                   &adapter->if_handle, 0);
2951         if (status != 0)
2952                 goto err;
2953
2954         memset(mac, 0, ETH_ALEN);
2955         active_mac = false;
2956         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2957                                  &active_mac, &adapter->pmac_id[0]);
2958         if (status != 0)
2959                 goto err;
2960
2961         if (!active_mac) {
2962                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2963                                          &adapter->pmac_id[0], 0);
2964                 if (status != 0)
2965                         goto err;
2966         }
2967
2968         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2969                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2970                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2971         }
2972
2973         status = be_tx_qs_create(adapter);
2974         if (status)
2975                 goto err;
2976
2977         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2978
2979         if (adapter->vlans_added)
2980                 be_vid_config(adapter);
2981
2982         be_set_rx_mode(adapter->netdev);
2983
2984         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2985
2986         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2987                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2988                                         adapter->rx_fc);
2989
2990         if (be_physfn(adapter) && num_vfs) {
2991                 if (adapter->dev_num_vfs)
2992                         be_vf_setup(adapter);
2993                 else
2994                         dev_warn(dev, "device doesn't support SRIOV\n");
2995         }
2996
2997         status = be_cmd_get_phy_info(adapter);
2998         if (!status && be_pause_supported(adapter))
2999                 adapter->phy.fc_autoneg = 1;
3000
3001         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3002         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3003         return 0;
3004 err:
3005         be_clear(adapter);
3006         return status;
3007 }
3008
3009 #ifdef CONFIG_NET_POLL_CONTROLLER
3010 static void be_netpoll(struct net_device *netdev)
3011 {
3012         struct be_adapter *adapter = netdev_priv(netdev);
3013         struct be_eq_obj *eqo;
3014         int i;
3015
3016         for_all_evt_queues(adapter, eqo, i) {
3017                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3018                 napi_schedule(&eqo->napi);
3019         }
3020
3021         return;
3022 }
3023 #endif
3024
3025 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3026 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3027
3028 static bool be_flash_redboot(struct be_adapter *adapter,
3029                         const u8 *p, u32 img_start, int image_size,
3030                         int hdr_size)
3031 {
3032         u32 crc_offset;
3033         u8 flashed_crc[4];
3034         int status;
3035
3036         crc_offset = hdr_size + img_start + image_size - 4;
3037
3038         p += crc_offset;
3039
3040         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3041                         (image_size - 4));
3042         if (status) {
3043                 dev_err(&adapter->pdev->dev,
3044                 "could not get crc from flash, not flashing redboot\n");
3045                 return false;
3046         }
3047
3048         /*update redboot only if crc does not match*/
3049         if (!memcmp(flashed_crc, p, 4))
3050                 return false;
3051         else
3052                 return true;
3053 }
3054
3055 static bool phy_flashing_required(struct be_adapter *adapter)
3056 {
3057         return (adapter->phy.phy_type == TN_8022 &&
3058                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3059 }
3060
3061 static bool is_comp_in_ufi(struct be_adapter *adapter,
3062                            struct flash_section_info *fsec, int type)
3063 {
3064         int i = 0, img_type = 0;
3065         struct flash_section_info_g2 *fsec_g2 = NULL;
3066
3067         if (BE2_chip(adapter))
3068                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3069
3070         for (i = 0; i < MAX_FLASH_COMP; i++) {
3071                 if (fsec_g2)
3072                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3073                 else
3074                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3075
3076                 if (img_type == type)
3077                         return true;
3078         }
3079         return false;
3080
3081 }
3082
3083 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3084                                          int header_size,
3085                                          const struct firmware *fw)
3086 {
3087         struct flash_section_info *fsec = NULL;
3088         const u8 *p = fw->data;
3089
3090         p += header_size;
3091         while (p < (fw->data + fw->size)) {
3092                 fsec = (struct flash_section_info *)p;
3093                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3094                         return fsec;
3095                 p += 32;
3096         }
3097         return NULL;
3098 }
3099
3100 static int be_flash(struct be_adapter *adapter, const u8 *img,
3101                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3102 {
3103         u32 total_bytes = 0, flash_op, num_bytes = 0;
3104         int status = 0;
3105         struct be_cmd_write_flashrom *req = flash_cmd->va;
3106
3107         total_bytes = img_size;
3108         while (total_bytes) {
3109                 num_bytes = min_t(u32, 32*1024, total_bytes);
3110
3111                 total_bytes -= num_bytes;
3112
3113                 if (!total_bytes) {
3114                         if (optype == OPTYPE_PHY_FW)
3115                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3116                         else
3117                                 flash_op = FLASHROM_OPER_FLASH;
3118                 } else {
3119                         if (optype == OPTYPE_PHY_FW)
3120                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3121                         else
3122                                 flash_op = FLASHROM_OPER_SAVE;
3123                 }
3124
3125                 memcpy(req->data_buf, img, num_bytes);
3126                 img += num_bytes;
3127                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3128                                                 flash_op, num_bytes);
3129                 if (status) {
3130                         if (status == ILLEGAL_IOCTL_REQ &&
3131                             optype == OPTYPE_PHY_FW)
3132                                 break;
3133                         dev_err(&adapter->pdev->dev,
3134                                 "cmd to write to flash rom failed.\n");
3135                         return status;
3136                 }
3137         }
3138         return 0;
3139 }
3140
3141 /* For BE2 and BE3 */
3142 static int be_flash_BEx(struct be_adapter *adapter,
3143                          const struct firmware *fw,
3144                          struct be_dma_mem *flash_cmd,
3145                          int num_of_images)
3146
3147 {
3148         int status = 0, i, filehdr_size = 0;
3149         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3150         const u8 *p = fw->data;
3151         const struct flash_comp *pflashcomp;
3152         int num_comp, redboot;
3153         struct flash_section_info *fsec = NULL;
3154
3155         struct flash_comp gen3_flash_types[] = {
3156                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3157                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3158                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3159                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3160                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3161                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3162                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3163                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3164                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3165                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3166                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3167                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3168                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3169                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3170                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3171                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3172                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3173                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3174                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3175                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3176         };
3177
3178         struct flash_comp gen2_flash_types[] = {
3179                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3180                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3181                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3182                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3183                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3184                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3185                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3186                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3187                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3188                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3189                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3190                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3191                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3192                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3193                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3194                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3195         };
3196
3197         if (BE3_chip(adapter)) {
3198                 pflashcomp = gen3_flash_types;
3199                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3200                 num_comp = ARRAY_SIZE(gen3_flash_types);
3201         } else {
3202                 pflashcomp = gen2_flash_types;
3203                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3204                 num_comp = ARRAY_SIZE(gen2_flash_types);
3205         }
3206
3207         /* Get flash section info*/
3208         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3209         if (!fsec) {
3210                 dev_err(&adapter->pdev->dev,
3211                         "Invalid Cookie. UFI corrupted ?\n");
3212                 return -1;
3213         }
3214         for (i = 0; i < num_comp; i++) {
3215                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3216                         continue;
3217
3218                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3219                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3220                         continue;
3221
3222                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3223                     !phy_flashing_required(adapter))
3224                                 continue;
3225
3226                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3227                         redboot = be_flash_redboot(adapter, fw->data,
3228                                 pflashcomp[i].offset, pflashcomp[i].size,
3229                                 filehdr_size + img_hdrs_size);
3230                         if (!redboot)
3231                                 continue;
3232                 }
3233
3234                 p = fw->data;
3235                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3236                 if (p + pflashcomp[i].size > fw->data + fw->size)
3237                         return -1;
3238
3239                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3240                                         pflashcomp[i].size);
3241                 if (status) {
3242                         dev_err(&adapter->pdev->dev,
3243                                 "Flashing section type %d failed.\n",
3244                                 pflashcomp[i].img_type);
3245                         return status;
3246                 }
3247         }
3248         return 0;
3249 }
3250
3251 static int be_flash_skyhawk(struct be_adapter *adapter,
3252                 const struct firmware *fw,
3253                 struct be_dma_mem *flash_cmd, int num_of_images)
3254 {
3255         int status = 0, i, filehdr_size = 0;
3256         int img_offset, img_size, img_optype, redboot;
3257         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3258         const u8 *p = fw->data;
3259         struct flash_section_info *fsec = NULL;
3260
3261         filehdr_size = sizeof(struct flash_file_hdr_g3);
3262         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3263         if (!fsec) {
3264                 dev_err(&adapter->pdev->dev,
3265                         "Invalid Cookie. UFI corrupted ?\n");
3266                 return -1;
3267         }
3268
3269         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3270                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3271                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3272
3273                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3274                 case IMAGE_FIRMWARE_iSCSI:
3275                         img_optype = OPTYPE_ISCSI_ACTIVE;
3276                         break;
3277                 case IMAGE_BOOT_CODE:
3278                         img_optype = OPTYPE_REDBOOT;
3279                         break;
3280                 case IMAGE_OPTION_ROM_ISCSI:
3281                         img_optype = OPTYPE_BIOS;
3282                         break;
3283                 case IMAGE_OPTION_ROM_PXE:
3284                         img_optype = OPTYPE_PXE_BIOS;
3285                         break;
3286                 case IMAGE_OPTION_ROM_FCoE:
3287                         img_optype = OPTYPE_FCOE_BIOS;
3288                         break;
3289                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3290                         img_optype = OPTYPE_ISCSI_BACKUP;
3291                         break;
3292                 case IMAGE_NCSI:
3293                         img_optype = OPTYPE_NCSI_FW;
3294                         break;
3295                 default:
3296                         continue;
3297                 }
3298
3299                 if (img_optype == OPTYPE_REDBOOT) {
3300                         redboot = be_flash_redboot(adapter, fw->data,
3301                                         img_offset, img_size,
3302                                         filehdr_size + img_hdrs_size);
3303                         if (!redboot)
3304                                 continue;
3305                 }
3306
3307                 p = fw->data;
3308                 p += filehdr_size + img_offset + img_hdrs_size;
3309                 if (p + img_size > fw->data + fw->size)
3310                         return -1;
3311
3312                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3313                 if (status) {
3314                         dev_err(&adapter->pdev->dev,
3315                                 "Flashing section type %d failed.\n",
3316                                 fsec->fsec_entry[i].type);
3317                         return status;
3318                 }
3319         }
3320         return 0;
3321 }
3322
3323 static int lancer_wait_idle(struct be_adapter *adapter)
3324 {
3325 #define SLIPORT_IDLE_TIMEOUT 30
3326         u32 reg_val;
3327         int status = 0, i;
3328
3329         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3330                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3331                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3332                         break;
3333
3334                 ssleep(1);
3335         }
3336
3337         if (i == SLIPORT_IDLE_TIMEOUT)
3338                 status = -1;
3339
3340         return status;
3341 }
3342
3343 static int lancer_fw_reset(struct be_adapter *adapter)
3344 {
3345         int status = 0;
3346
3347         status = lancer_wait_idle(adapter);
3348         if (status)
3349                 return status;
3350
3351         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3352                   PHYSDEV_CONTROL_OFFSET);
3353
3354         return status;
3355 }
3356
3357 static int lancer_fw_download(struct be_adapter *adapter,
3358                                 const struct firmware *fw)
3359 {
3360 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3361 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3362         struct be_dma_mem flash_cmd;
3363         const u8 *data_ptr = NULL;
3364         u8 *dest_image_ptr = NULL;
3365         size_t image_size = 0;
3366         u32 chunk_size = 0;
3367         u32 data_written = 0;
3368         u32 offset = 0;
3369         int status = 0;
3370         u8 add_status = 0;
3371         u8 change_status;
3372
3373         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3374                 dev_err(&adapter->pdev->dev,
3375                         "FW Image not properly aligned. "
3376                         "Length must be 4 byte aligned.\n");
3377                 status = -EINVAL;
3378                 goto lancer_fw_exit;
3379         }
3380
3381         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3382                                 + LANCER_FW_DOWNLOAD_CHUNK;
3383         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3384                                                 &flash_cmd.dma, GFP_KERNEL);
3385         if (!flash_cmd.va) {
3386                 status = -ENOMEM;
3387                 dev_err(&adapter->pdev->dev,
3388                         "Memory allocation failure while flashing\n");
3389                 goto lancer_fw_exit;
3390         }
3391
3392         dest_image_ptr = flash_cmd.va +
3393                                 sizeof(struct lancer_cmd_req_write_object);
3394         image_size = fw->size;
3395         data_ptr = fw->data;
3396
3397         while (image_size) {
3398                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3399
3400                 /* Copy the image chunk content. */
3401                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3402
3403                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3404                                                  chunk_size, offset,
3405                                                  LANCER_FW_DOWNLOAD_LOCATION,
3406                                                  &data_written, &change_status,
3407                                                  &add_status);
3408                 if (status)
3409                         break;
3410
3411                 offset += data_written;
3412                 data_ptr += data_written;
3413                 image_size -= data_written;
3414         }
3415
3416         if (!status) {
3417                 /* Commit the FW written */
3418                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3419                                                  0, offset,
3420                                                  LANCER_FW_DOWNLOAD_LOCATION,
3421                                                  &data_written, &change_status,
3422                                                  &add_status);
3423         }
3424
3425         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3426                                 flash_cmd.dma);
3427         if (status) {
3428                 dev_err(&adapter->pdev->dev,
3429                         "Firmware load error. "
3430                         "Status code: 0x%x Additional Status: 0x%x\n",
3431                         status, add_status);
3432                 goto lancer_fw_exit;
3433         }
3434
3435         if (change_status == LANCER_FW_RESET_NEEDED) {
3436                 status = lancer_fw_reset(adapter);
3437                 if (status) {
3438                         dev_err(&adapter->pdev->dev,
3439                                 "Adapter busy for FW reset.\n"
3440                                 "New FW will not be active.\n");
3441                         goto lancer_fw_exit;
3442                 }
3443         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3444                         dev_err(&adapter->pdev->dev,
3445                                 "System reboot required for new FW"
3446                                 " to be active\n");
3447         }
3448
3449         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3450 lancer_fw_exit:
3451         return status;
3452 }
3453
3454 #define UFI_TYPE2               2
3455 #define UFI_TYPE3               3
3456 #define UFI_TYPE4               4
3457 static int be_get_ufi_type(struct be_adapter *adapter,
3458                            struct flash_file_hdr_g2 *fhdr)
3459 {
3460         if (fhdr == NULL)
3461                 goto be_get_ufi_exit;
3462
3463         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3464                 return UFI_TYPE4;
3465         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3466                 return UFI_TYPE3;
3467         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3468                 return UFI_TYPE2;
3469
3470 be_get_ufi_exit:
3471         dev_err(&adapter->pdev->dev,
3472                 "UFI and Interface are not compatible for flashing\n");
3473         return -1;
3474 }
3475
3476 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3477 {
3478         struct flash_file_hdr_g2 *fhdr;
3479         struct flash_file_hdr_g3 *fhdr3;
3480         struct image_hdr *img_hdr_ptr = NULL;
3481         struct be_dma_mem flash_cmd;
3482         const u8 *p;
3483         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3484
3485         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3486         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3487                                           &flash_cmd.dma, GFP_KERNEL);
3488         if (!flash_cmd.va) {
3489                 status = -ENOMEM;
3490                 dev_err(&adapter->pdev->dev,
3491                         "Memory allocation failure while flashing\n");
3492                 goto be_fw_exit;
3493         }
3494
3495         p = fw->data;
3496         fhdr = (struct flash_file_hdr_g2 *)p;
3497
3498         ufi_type = be_get_ufi_type(adapter, fhdr);
3499
3500         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3501         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3502         for (i = 0; i < num_imgs; i++) {
3503                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3504                                 (sizeof(struct flash_file_hdr_g3) +
3505                                  i * sizeof(struct image_hdr)));
3506                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3507                         if (ufi_type == UFI_TYPE4)
3508                                 status = be_flash_skyhawk(adapter, fw,
3509                                                         &flash_cmd, num_imgs);
3510                         else if (ufi_type == UFI_TYPE3)
3511                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3512                                                       num_imgs);
3513                 }
3514         }
3515
3516         if (ufi_type == UFI_TYPE2)
3517                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3518         else if (ufi_type == -1)
3519                 status = -1;
3520
3521         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3522                           flash_cmd.dma);
3523         if (status) {
3524                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3525                 goto be_fw_exit;
3526         }
3527
3528         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3529
3530 be_fw_exit:
3531         return status;
3532 }
3533
3534 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3535 {
3536         const struct firmware *fw;
3537         int status;
3538
3539         if (!netif_running(adapter->netdev)) {
3540                 dev_err(&adapter->pdev->dev,
3541                         "Firmware load not allowed (interface is down)\n");
3542                 return -1;
3543         }
3544
3545         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3546         if (status)
3547                 goto fw_exit;
3548
3549         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3550
3551         if (lancer_chip(adapter))
3552                 status = lancer_fw_download(adapter, fw);
3553         else
3554                 status = be_fw_download(adapter, fw);
3555
3556 fw_exit:
3557         release_firmware(fw);
3558         return status;
3559 }
3560
3561 static const struct net_device_ops be_netdev_ops = {
3562         .ndo_open               = be_open,
3563         .ndo_stop               = be_close,
3564         .ndo_start_xmit         = be_xmit,
3565         .ndo_set_rx_mode        = be_set_rx_mode,
3566         .ndo_set_mac_address    = be_mac_addr_set,
3567         .ndo_change_mtu         = be_change_mtu,
3568         .ndo_get_stats64        = be_get_stats64,
3569         .ndo_validate_addr      = eth_validate_addr,
3570         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3571         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3572         .ndo_set_vf_mac         = be_set_vf_mac,
3573         .ndo_set_vf_vlan        = be_set_vf_vlan,
3574         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3575         .ndo_get_vf_config      = be_get_vf_config,
3576 #ifdef CONFIG_NET_POLL_CONTROLLER
3577         .ndo_poll_controller    = be_netpoll,
3578 #endif
3579 };
3580
3581 static void be_netdev_init(struct net_device *netdev)
3582 {
3583         struct be_adapter *adapter = netdev_priv(netdev);
3584         struct be_eq_obj *eqo;
3585         int i;
3586
3587         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3588                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3589                 NETIF_F_HW_VLAN_TX;
3590         if (be_multi_rxq(adapter))
3591                 netdev->hw_features |= NETIF_F_RXHASH;
3592
3593         netdev->features |= netdev->hw_features |
3594                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3595
3596         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3597                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3598
3599         netdev->priv_flags |= IFF_UNICAST_FLT;
3600
3601         netdev->flags |= IFF_MULTICAST;
3602
3603         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3604
3605         netdev->netdev_ops = &be_netdev_ops;
3606
3607         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3608
3609         for_all_evt_queues(adapter, eqo, i)
3610                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3611 }
3612
3613 static void be_unmap_pci_bars(struct be_adapter *adapter)
3614 {
3615         if (adapter->db)
3616                 pci_iounmap(adapter->pdev, adapter->db);
3617 }
3618
3619 static int db_bar(struct be_adapter *adapter)
3620 {
3621         if (lancer_chip(adapter) || !be_physfn(adapter))
3622                 return 0;
3623         else
3624                 return 4;
3625 }
3626
3627 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3628 {
3629         if (skyhawk_chip(adapter)) {
3630                 adapter->roce_db.size = 4096;
3631                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3632                                                               db_bar(adapter));
3633                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3634                                                                db_bar(adapter));
3635         }
3636         return 0;
3637 }
3638
3639 static int be_map_pci_bars(struct be_adapter *adapter)
3640 {
3641         u8 __iomem *addr;
3642         u32 sli_intf;
3643
3644         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3645         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3646                                 SLI_INTF_IF_TYPE_SHIFT;
3647
3648         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3649         if (addr == NULL)
3650                 goto pci_map_err;
3651         adapter->db = addr;
3652
3653         be_roce_map_pci_bars(adapter);
3654         return 0;
3655
3656 pci_map_err:
3657         be_unmap_pci_bars(adapter);
3658         return -ENOMEM;
3659 }
3660
3661 static void be_ctrl_cleanup(struct be_adapter *adapter)
3662 {
3663         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3664
3665         be_unmap_pci_bars(adapter);
3666
3667         if (mem->va)
3668                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3669                                   mem->dma);
3670
3671         mem = &adapter->rx_filter;
3672         if (mem->va)
3673                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3674                                   mem->dma);
3675 }
3676
3677 static int be_ctrl_init(struct be_adapter *adapter)
3678 {
3679         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3680         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3681         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3682         u32 sli_intf;
3683         int status;
3684
3685         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3686         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3687                                  SLI_INTF_FAMILY_SHIFT;
3688         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3689
3690         status = be_map_pci_bars(adapter);
3691         if (status)
3692                 goto done;
3693
3694         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3695         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3696                                                 mbox_mem_alloc->size,
3697                                                 &mbox_mem_alloc->dma,
3698                                                 GFP_KERNEL);
3699         if (!mbox_mem_alloc->va) {
3700                 status = -ENOMEM;
3701                 goto unmap_pci_bars;
3702         }
3703         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3704         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3705         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3706         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3707
3708         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3709         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3710                                         &rx_filter->dma, GFP_KERNEL);
3711         if (rx_filter->va == NULL) {
3712                 status = -ENOMEM;
3713                 goto free_mbox;
3714         }
3715         memset(rx_filter->va, 0, rx_filter->size);
3716         mutex_init(&adapter->mbox_lock);
3717         spin_lock_init(&adapter->mcc_lock);
3718         spin_lock_init(&adapter->mcc_cq_lock);
3719
3720         init_completion(&adapter->flash_compl);
3721         pci_save_state(adapter->pdev);
3722         return 0;
3723
3724 free_mbox:
3725         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3726                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3727
3728 unmap_pci_bars:
3729         be_unmap_pci_bars(adapter);
3730
3731 done:
3732         return status;
3733 }
3734
3735 static void be_stats_cleanup(struct be_adapter *adapter)
3736 {
3737         struct be_dma_mem *cmd = &adapter->stats_cmd;
3738
3739         if (cmd->va)
3740                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3741                                   cmd->va, cmd->dma);
3742 }
3743
3744 static int be_stats_init(struct be_adapter *adapter)
3745 {
3746         struct be_dma_mem *cmd = &adapter->stats_cmd;
3747
3748         if (lancer_chip(adapter))
3749                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3750         else if (BE2_chip(adapter))
3751                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3752         else
3753                 /* BE3 and Skyhawk */
3754                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3755
3756         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3757                                      GFP_KERNEL);
3758         if (cmd->va == NULL)
3759                 return -1;
3760         memset(cmd->va, 0, cmd->size);
3761         return 0;
3762 }
3763
3764 static void __devexit be_remove(struct pci_dev *pdev)
3765 {
3766         struct be_adapter *adapter = pci_get_drvdata(pdev);
3767
3768         if (!adapter)
3769                 return;
3770
3771         be_roce_dev_remove(adapter);
3772
3773         cancel_delayed_work_sync(&adapter->func_recovery_work);
3774
3775         unregister_netdev(adapter->netdev);
3776
3777         be_clear(adapter);
3778
3779         /* tell fw we're done with firing cmds */
3780         be_cmd_fw_clean(adapter);
3781
3782         be_stats_cleanup(adapter);
3783
3784         be_ctrl_cleanup(adapter);
3785
3786         pci_disable_pcie_error_reporting(pdev);
3787
3788         pci_set_drvdata(pdev, NULL);
3789         pci_release_regions(pdev);
3790         pci_disable_device(pdev);
3791
3792         free_netdev(adapter->netdev);
3793 }
3794
3795 bool be_is_wol_supported(struct be_adapter *adapter)
3796 {
3797         return ((adapter->wol_cap & BE_WOL_CAP) &&
3798                 !be_is_wol_excluded(adapter)) ? true : false;
3799 }
3800
3801 u32 be_get_fw_log_level(struct be_adapter *adapter)
3802 {
3803         struct be_dma_mem extfat_cmd;
3804         struct be_fat_conf_params *cfgs;
3805         int status;
3806         u32 level = 0;
3807         int j;
3808
3809         if (lancer_chip(adapter))
3810                 return 0;
3811
3812         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3813         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3814         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3815                                              &extfat_cmd.dma);
3816
3817         if (!extfat_cmd.va) {
3818                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3819                         __func__);
3820                 goto err;
3821         }
3822
3823         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3824         if (!status) {
3825                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3826                                                 sizeof(struct be_cmd_resp_hdr));
3827                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3828                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3829                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3830                 }
3831         }
3832         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3833                             extfat_cmd.dma);
3834 err:
3835         return level;
3836 }
3837
3838 static int be_get_initial_config(struct be_adapter *adapter)
3839 {
3840         int status;
3841         u32 level;
3842
3843         status = be_cmd_get_cntl_attributes(adapter);
3844         if (status)
3845                 return status;
3846
3847         status = be_cmd_get_acpi_wol_cap(adapter);
3848         if (status) {
3849                 /* in case of a failure to get wol capabillities
3850                  * check the exclusion list to determine WOL capability */
3851                 if (!be_is_wol_excluded(adapter))
3852                         adapter->wol_cap |= BE_WOL_CAP;
3853         }
3854
3855         if (be_is_wol_supported(adapter))
3856                 adapter->wol = true;
3857
3858         /* Must be a power of 2 or else MODULO will BUG_ON */
3859         adapter->be_get_temp_freq = 64;
3860
3861         level = be_get_fw_log_level(adapter);
3862         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3863
3864         return 0;
3865 }
3866
3867 static int lancer_recover_func(struct be_adapter *adapter)
3868 {
3869         int status;
3870
3871         status = lancer_test_and_set_rdy_state(adapter);
3872         if (status)
3873                 goto err;
3874
3875         if (netif_running(adapter->netdev))
3876                 be_close(adapter->netdev);
3877
3878         be_clear(adapter);
3879
3880         adapter->hw_error = false;
3881         adapter->fw_timeout = false;
3882
3883         status = be_setup(adapter);
3884         if (status)
3885                 goto err;
3886
3887         if (netif_running(adapter->netdev)) {
3888                 status = be_open(adapter->netdev);
3889                 if (status)
3890                         goto err;
3891         }
3892
3893         dev_err(&adapter->pdev->dev,
3894                 "Adapter SLIPORT recovery succeeded\n");
3895         return 0;
3896 err:
3897         if (adapter->eeh_error)
3898                 dev_err(&adapter->pdev->dev,
3899                         "Adapter SLIPORT recovery failed\n");
3900
3901         return status;
3902 }
3903
3904 static void be_func_recovery_task(struct work_struct *work)
3905 {
3906         struct be_adapter *adapter =
3907                 container_of(work, struct be_adapter,  func_recovery_work.work);
3908         int status;
3909
3910         be_detect_error(adapter);
3911
3912         if (adapter->hw_error && lancer_chip(adapter)) {
3913
3914                 if (adapter->eeh_error)
3915                         goto out;
3916
3917                 rtnl_lock();
3918                 netif_device_detach(adapter->netdev);
3919                 rtnl_unlock();
3920
3921                 status = lancer_recover_func(adapter);
3922
3923                 if (!status)
3924                         netif_device_attach(adapter->netdev);
3925         }
3926
3927 out:
3928         schedule_delayed_work(&adapter->func_recovery_work,
3929                               msecs_to_jiffies(1000));
3930 }
3931
3932 static void be_worker(struct work_struct *work)
3933 {
3934         struct be_adapter *adapter =
3935                 container_of(work, struct be_adapter, work.work);
3936         struct be_rx_obj *rxo;
3937         struct be_eq_obj *eqo;
3938         int i;
3939
3940         /* when interrupts are not yet enabled, just reap any pending
3941         * mcc completions */
3942         if (!netif_running(adapter->netdev)) {
3943                 local_bh_disable();
3944                 be_process_mcc(adapter);
3945                 local_bh_enable();
3946                 goto reschedule;
3947         }
3948
3949         if (!adapter->stats_cmd_sent) {
3950                 if (lancer_chip(adapter))
3951                         lancer_cmd_get_pport_stats(adapter,
3952                                                 &adapter->stats_cmd);
3953                 else
3954                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3955         }
3956
3957         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3958                 be_cmd_get_die_temperature(adapter);
3959
3960         for_all_rx_queues(adapter, rxo, i) {
3961                 if (rxo->rx_post_starved) {
3962                         rxo->rx_post_starved = false;
3963                         be_post_rx_frags(rxo, GFP_KERNEL);
3964                 }
3965         }
3966
3967         for_all_evt_queues(adapter, eqo, i)
3968                 be_eqd_update(adapter, eqo);
3969
3970 reschedule:
3971         adapter->work_counter++;
3972         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3973 }
3974
3975 static bool be_reset_required(struct be_adapter *adapter)
3976 {
3977         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
3978 }
3979
3980 static char *mc_name(struct be_adapter *adapter)
3981 {
3982         if (adapter->function_mode & FLEX10_MODE)
3983                 return "FLEX10";
3984         else if (adapter->function_mode & VNIC_MODE)
3985                 return "vNIC";
3986         else if (adapter->function_mode & UMC_ENABLED)
3987                 return "UMC";
3988         else
3989                 return "";
3990 }
3991
3992 static inline char *func_name(struct be_adapter *adapter)
3993 {
3994         return be_physfn(adapter) ? "PF" : "VF";
3995 }
3996
3997 static int __devinit be_probe(struct pci_dev *pdev,
3998                         const struct pci_device_id *pdev_id)
3999 {
4000         int status = 0;
4001         struct be_adapter *adapter;
4002         struct net_device *netdev;
4003         char port_name;
4004
4005         status = pci_enable_device(pdev);
4006         if (status)
4007                 goto do_none;
4008
4009         status = pci_request_regions(pdev, DRV_NAME);
4010         if (status)
4011                 goto disable_dev;
4012         pci_set_master(pdev);
4013
4014         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4015         if (netdev == NULL) {
4016                 status = -ENOMEM;
4017                 goto rel_reg;
4018         }
4019         adapter = netdev_priv(netdev);
4020         adapter->pdev = pdev;
4021         pci_set_drvdata(pdev, adapter);
4022         adapter->netdev = netdev;
4023         SET_NETDEV_DEV(netdev, &pdev->dev);
4024
4025         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4026         if (!status) {
4027                 netdev->features |= NETIF_F_HIGHDMA;
4028         } else {
4029                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4030                 if (status) {
4031                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4032                         goto free_netdev;
4033                 }
4034         }
4035
4036         status = pci_enable_pcie_error_reporting(pdev);
4037         if (status)
4038                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4039
4040         status = be_ctrl_init(adapter);
4041         if (status)
4042                 goto free_netdev;
4043
4044         /* sync up with fw's ready state */
4045         if (be_physfn(adapter)) {
4046                 status = be_fw_wait_ready(adapter);
4047                 if (status)
4048                         goto ctrl_clean;
4049         }
4050
4051         /* tell fw we're ready to fire cmds */
4052         status = be_cmd_fw_init(adapter);
4053         if (status)
4054                 goto ctrl_clean;
4055
4056         if (be_reset_required(adapter)) {
4057                 status = be_cmd_reset_function(adapter);
4058                 if (status)
4059                         goto ctrl_clean;
4060         }
4061
4062         /* The INTR bit may be set in the card when probed by a kdump kernel
4063          * after a crash.
4064          */
4065         if (!lancer_chip(adapter))
4066                 be_intr_set(adapter, false);
4067
4068         status = be_stats_init(adapter);
4069         if (status)
4070                 goto ctrl_clean;
4071
4072         status = be_get_initial_config(adapter);
4073         if (status)
4074                 goto stats_clean;
4075
4076         INIT_DELAYED_WORK(&adapter->work, be_worker);
4077         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4078         adapter->rx_fc = adapter->tx_fc = true;
4079
4080         status = be_setup(adapter);
4081         if (status)
4082                 goto stats_clean;
4083
4084         be_netdev_init(netdev);
4085         status = register_netdev(netdev);
4086         if (status != 0)
4087                 goto unsetup;
4088
4089         be_roce_dev_add(adapter);
4090
4091         schedule_delayed_work(&adapter->func_recovery_work,
4092                               msecs_to_jiffies(1000));
4093
4094         be_cmd_query_port_name(adapter, &port_name);
4095
4096         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4097                  func_name(adapter), mc_name(adapter), port_name);
4098
4099         return 0;
4100
4101 unsetup:
4102         be_clear(adapter);
4103 stats_clean:
4104         be_stats_cleanup(adapter);
4105 ctrl_clean:
4106         be_ctrl_cleanup(adapter);
4107 free_netdev:
4108         free_netdev(netdev);
4109         pci_set_drvdata(pdev, NULL);
4110 rel_reg:
4111         pci_release_regions(pdev);
4112 disable_dev:
4113         pci_disable_device(pdev);
4114 do_none:
4115         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4116         return status;
4117 }
4118
4119 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4120 {
4121         struct be_adapter *adapter = pci_get_drvdata(pdev);
4122         struct net_device *netdev =  adapter->netdev;
4123
4124         if (adapter->wol)
4125                 be_setup_wol(adapter, true);
4126
4127         cancel_delayed_work_sync(&adapter->func_recovery_work);
4128
4129         netif_device_detach(netdev);
4130         if (netif_running(netdev)) {
4131                 rtnl_lock();
4132                 be_close(netdev);
4133                 rtnl_unlock();
4134         }
4135         be_clear(adapter);
4136
4137         pci_save_state(pdev);
4138         pci_disable_device(pdev);
4139         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4140         return 0;
4141 }
4142
4143 static int be_resume(struct pci_dev *pdev)
4144 {
4145         int status = 0;
4146         struct be_adapter *adapter = pci_get_drvdata(pdev);
4147         struct net_device *netdev =  adapter->netdev;
4148
4149         netif_device_detach(netdev);
4150
4151         status = pci_enable_device(pdev);
4152         if (status)
4153                 return status;
4154
4155         pci_set_power_state(pdev, 0);
4156         pci_restore_state(pdev);
4157
4158         /* tell fw we're ready to fire cmds */
4159         status = be_cmd_fw_init(adapter);
4160         if (status)
4161                 return status;
4162
4163         be_setup(adapter);
4164         if (netif_running(netdev)) {
4165                 rtnl_lock();
4166                 be_open(netdev);
4167                 rtnl_unlock();
4168         }
4169
4170         schedule_delayed_work(&adapter->func_recovery_work,
4171                               msecs_to_jiffies(1000));
4172         netif_device_attach(netdev);
4173
4174         if (adapter->wol)
4175                 be_setup_wol(adapter, false);
4176
4177         return 0;
4178 }
4179
4180 /*
4181  * An FLR will stop BE from DMAing any data.
4182  */
4183 static void be_shutdown(struct pci_dev *pdev)
4184 {
4185         struct be_adapter *adapter = pci_get_drvdata(pdev);
4186
4187         if (!adapter)
4188                 return;
4189
4190         cancel_delayed_work_sync(&adapter->work);
4191         cancel_delayed_work_sync(&adapter->func_recovery_work);
4192
4193         netif_device_detach(adapter->netdev);
4194
4195         be_cmd_reset_function(adapter);
4196
4197         pci_disable_device(pdev);
4198 }
4199
4200 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4201                                 pci_channel_state_t state)
4202 {
4203         struct be_adapter *adapter = pci_get_drvdata(pdev);
4204         struct net_device *netdev =  adapter->netdev;
4205
4206         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4207
4208         adapter->eeh_error = true;
4209
4210         cancel_delayed_work_sync(&adapter->func_recovery_work);
4211
4212         rtnl_lock();
4213         netif_device_detach(netdev);
4214         rtnl_unlock();
4215
4216         if (netif_running(netdev)) {
4217                 rtnl_lock();
4218                 be_close(netdev);
4219                 rtnl_unlock();
4220         }
4221         be_clear(adapter);
4222
4223         if (state == pci_channel_io_perm_failure)
4224                 return PCI_ERS_RESULT_DISCONNECT;
4225
4226         pci_disable_device(pdev);
4227
4228         /* The error could cause the FW to trigger a flash debug dump.
4229          * Resetting the card while flash dump is in progress
4230          * can cause it not to recover; wait for it to finish.
4231          * Wait only for first function as it is needed only once per
4232          * adapter.
4233          */
4234         if (pdev->devfn == 0)
4235                 ssleep(30);
4236
4237         return PCI_ERS_RESULT_NEED_RESET;
4238 }
4239
4240 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4241 {
4242         struct be_adapter *adapter = pci_get_drvdata(pdev);
4243         int status;
4244
4245         dev_info(&adapter->pdev->dev, "EEH reset\n");
4246         be_clear_all_error(adapter);
4247
4248         status = pci_enable_device(pdev);
4249         if (status)
4250                 return PCI_ERS_RESULT_DISCONNECT;
4251
4252         pci_set_master(pdev);
4253         pci_set_power_state(pdev, 0);
4254         pci_restore_state(pdev);
4255
4256         /* Check if card is ok and fw is ready */
4257         status = be_fw_wait_ready(adapter);
4258         if (status)
4259                 return PCI_ERS_RESULT_DISCONNECT;
4260
4261         pci_cleanup_aer_uncorrect_error_status(pdev);
4262         return PCI_ERS_RESULT_RECOVERED;
4263 }
4264
4265 static void be_eeh_resume(struct pci_dev *pdev)
4266 {
4267         int status = 0;
4268         struct be_adapter *adapter = pci_get_drvdata(pdev);
4269         struct net_device *netdev =  adapter->netdev;
4270
4271         dev_info(&adapter->pdev->dev, "EEH resume\n");
4272
4273         pci_save_state(pdev);
4274
4275         /* tell fw we're ready to fire cmds */
4276         status = be_cmd_fw_init(adapter);
4277         if (status)
4278                 goto err;
4279
4280         status = be_cmd_reset_function(adapter);
4281         if (status)
4282                 goto err;
4283
4284         status = be_setup(adapter);
4285         if (status)
4286                 goto err;
4287
4288         if (netif_running(netdev)) {
4289                 status = be_open(netdev);
4290                 if (status)
4291                         goto err;
4292         }
4293
4294         schedule_delayed_work(&adapter->func_recovery_work,
4295                               msecs_to_jiffies(1000));
4296         netif_device_attach(netdev);
4297         return;
4298 err:
4299         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4300 }
4301
4302 static const struct pci_error_handlers be_eeh_handlers = {
4303         .error_detected = be_eeh_err_detected,
4304         .slot_reset = be_eeh_reset,
4305         .resume = be_eeh_resume,
4306 };
4307
4308 static struct pci_driver be_driver = {
4309         .name = DRV_NAME,
4310         .id_table = be_dev_ids,
4311         .probe = be_probe,
4312         .remove = be_remove,
4313         .suspend = be_suspend,
4314         .resume = be_resume,
4315         .shutdown = be_shutdown,
4316         .err_handler = &be_eeh_handlers
4317 };
4318
4319 static int __init be_init_module(void)
4320 {
4321         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4322             rx_frag_size != 2048) {
4323                 printk(KERN_WARNING DRV_NAME
4324                         " : Module param rx_frag_size must be 2048/4096/8192."
4325                         " Using 2048\n");
4326                 rx_frag_size = 2048;
4327         }
4328
4329         return pci_register_driver(&be_driver);
4330 }
4331 module_init(be_init_module);
4332
4333 static void __exit be_exit_module(void)
4334 {
4335         pci_unregister_driver(&be_driver);
4336 }
4337 module_exit(be_exit_module);