]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
Merge branches 'powercap' and 'acpi-lpss' with new device IDs
[~andy/linux] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_main.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <net/addrconf.h>
64 #include <asm/uaccess.h>
65
66 #include "cxgb4.h"
67 #include "t4_regs.h"
68 #include "t4_msg.h"
69 #include "t4fw_api.h"
70 #include "l2t.h"
71
72 #include <../drivers/net/bonding/bonding.h>
73
74 #ifdef DRV_VERSION
75 #undef DRV_VERSION
76 #endif
77 #define DRV_VERSION "2.0.0-ko"
78 #define DRV_DESC "Chelsio T4/T5 Network Driver"
79
80 /*
81  * Max interrupt hold-off timer value in us.  Queues fall back to this value
82  * under extreme memory pressure so it's largish to give the system time to
83  * recover.
84  */
85 #define MAX_SGE_TIMERVAL 200U
86
87 enum {
88         /*
89          * Physical Function provisioning constants.
90          */
91         PFRES_NVI = 4,                  /* # of Virtual Interfaces */
92         PFRES_NETHCTRL = 128,           /* # of EQs used for ETH or CTRL Qs */
93         PFRES_NIQFLINT = 128,           /* # of ingress Qs/w Free List(s)/intr
94                                          */
95         PFRES_NEQ = 256,                /* # of egress queues */
96         PFRES_NIQ = 0,                  /* # of ingress queues */
97         PFRES_TC = 0,                   /* PCI-E traffic class */
98         PFRES_NEXACTF = 128,            /* # of exact MPS filters */
99
100         PFRES_R_CAPS = FW_CMD_CAP_PF,
101         PFRES_WX_CAPS = FW_CMD_CAP_PF,
102
103 #ifdef CONFIG_PCI_IOV
104         /*
105          * Virtual Function provisioning constants.  We need two extra Ingress
106          * Queues with Interrupt capability to serve as the VF's Firmware
107          * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
108          * neither will have Free Lists associated with them).  For each
109          * Ethernet/Control Egress Queue and for each Free List, we need an
110          * Egress Context.
111          */
112         VFRES_NPORTS = 1,               /* # of "ports" per VF */
113         VFRES_NQSETS = 2,               /* # of "Queue Sets" per VF */
114
115         VFRES_NVI = VFRES_NPORTS,       /* # of Virtual Interfaces */
116         VFRES_NETHCTRL = VFRES_NQSETS,  /* # of EQs used for ETH or CTRL Qs */
117         VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
118         VFRES_NEQ = VFRES_NQSETS*2,     /* # of egress queues */
119         VFRES_NIQ = 0,                  /* # of non-fl/int ingress queues */
120         VFRES_TC = 0,                   /* PCI-E traffic class */
121         VFRES_NEXACTF = 16,             /* # of exact MPS filters */
122
123         VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
124         VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
125 #endif
126 };
127
128 /*
129  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
130  * static and likely not to be useful in the long run.  We really need to
131  * implement some form of persistent configuration which the firmware
132  * controls.
133  */
134 static unsigned int pfvfres_pmask(struct adapter *adapter,
135                                   unsigned int pf, unsigned int vf)
136 {
137         unsigned int portn, portvec;
138
139         /*
140          * Give PF's access to all of the ports.
141          */
142         if (vf == 0)
143                 return FW_PFVF_CMD_PMASK_MASK;
144
145         /*
146          * For VFs, we'll assign them access to the ports based purely on the
147          * PF.  We assign active ports in order, wrapping around if there are
148          * fewer active ports than PFs: e.g. active port[pf % nports].
149          * Unfortunately the adapter's port_info structs haven't been
150          * initialized yet so we have to compute this.
151          */
152         if (adapter->params.nports == 0)
153                 return 0;
154
155         portn = pf % adapter->params.nports;
156         portvec = adapter->params.portvec;
157         for (;;) {
158                 /*
159                  * Isolate the lowest set bit in the port vector.  If we're at
160                  * the port number that we want, return that as the pmask.
161                  * otherwise mask that bit out of the port vector and
162                  * decrement our port number ...
163                  */
164                 unsigned int pmask = portvec ^ (portvec & (portvec-1));
165                 if (portn == 0)
166                         return pmask;
167                 portn--;
168                 portvec &= ~pmask;
169         }
170         /*NOTREACHED*/
171 }
172
173 enum {
174         MAX_TXQ_ENTRIES      = 16384,
175         MAX_CTRL_TXQ_ENTRIES = 1024,
176         MAX_RSPQ_ENTRIES     = 16384,
177         MAX_RX_BUFFERS       = 16384,
178         MIN_TXQ_ENTRIES      = 32,
179         MIN_CTRL_TXQ_ENTRIES = 32,
180         MIN_RSPQ_ENTRIES     = 128,
181         MIN_FL_ENTRIES       = 16
182 };
183
184 /* Host shadow copy of ingress filter entry.  This is in host native format
185  * and doesn't match the ordering or bit order, etc. of the hardware of the
186  * firmware command.  The use of bit-field structure elements is purely to
187  * remind ourselves of the field size limitations and save memory in the case
188  * where the filter table is large.
189  */
190 struct filter_entry {
191         /* Administrative fields for filter.
192          */
193         u32 valid:1;            /* filter allocated and valid */
194         u32 locked:1;           /* filter is administratively locked */
195
196         u32 pending:1;          /* filter action is pending firmware reply */
197         u32 smtidx:8;           /* Source MAC Table index for smac */
198         struct l2t_entry *l2t;  /* Layer Two Table entry for dmac */
199
200         /* The filter itself.  Most of this is a straight copy of information
201          * provided by the extended ioctl().  Some fields are translated to
202          * internal forms -- for instance the Ingress Queue ID passed in from
203          * the ioctl() is translated into the Absolute Ingress Queue ID.
204          */
205         struct ch_filter_specification fs;
206 };
207
208 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
209                          NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
210                          NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
211
212 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
213
214 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
215         CH_DEVICE(0xa000, 0),  /* PE10K */
216         CH_DEVICE(0x4001, -1),
217         CH_DEVICE(0x4002, -1),
218         CH_DEVICE(0x4003, -1),
219         CH_DEVICE(0x4004, -1),
220         CH_DEVICE(0x4005, -1),
221         CH_DEVICE(0x4006, -1),
222         CH_DEVICE(0x4007, -1),
223         CH_DEVICE(0x4008, -1),
224         CH_DEVICE(0x4009, -1),
225         CH_DEVICE(0x400a, -1),
226         CH_DEVICE(0x4401, 4),
227         CH_DEVICE(0x4402, 4),
228         CH_DEVICE(0x4403, 4),
229         CH_DEVICE(0x4404, 4),
230         CH_DEVICE(0x4405, 4),
231         CH_DEVICE(0x4406, 4),
232         CH_DEVICE(0x4407, 4),
233         CH_DEVICE(0x4408, 4),
234         CH_DEVICE(0x4409, 4),
235         CH_DEVICE(0x440a, 4),
236         CH_DEVICE(0x440d, 4),
237         CH_DEVICE(0x440e, 4),
238         CH_DEVICE(0x5001, 4),
239         CH_DEVICE(0x5002, 4),
240         CH_DEVICE(0x5003, 4),
241         CH_DEVICE(0x5004, 4),
242         CH_DEVICE(0x5005, 4),
243         CH_DEVICE(0x5006, 4),
244         CH_DEVICE(0x5007, 4),
245         CH_DEVICE(0x5008, 4),
246         CH_DEVICE(0x5009, 4),
247         CH_DEVICE(0x500A, 4),
248         CH_DEVICE(0x500B, 4),
249         CH_DEVICE(0x500C, 4),
250         CH_DEVICE(0x500D, 4),
251         CH_DEVICE(0x500E, 4),
252         CH_DEVICE(0x500F, 4),
253         CH_DEVICE(0x5010, 4),
254         CH_DEVICE(0x5011, 4),
255         CH_DEVICE(0x5012, 4),
256         CH_DEVICE(0x5013, 4),
257         CH_DEVICE(0x5401, 4),
258         CH_DEVICE(0x5402, 4),
259         CH_DEVICE(0x5403, 4),
260         CH_DEVICE(0x5404, 4),
261         CH_DEVICE(0x5405, 4),
262         CH_DEVICE(0x5406, 4),
263         CH_DEVICE(0x5407, 4),
264         CH_DEVICE(0x5408, 4),
265         CH_DEVICE(0x5409, 4),
266         CH_DEVICE(0x540A, 4),
267         CH_DEVICE(0x540B, 4),
268         CH_DEVICE(0x540C, 4),
269         CH_DEVICE(0x540D, 4),
270         CH_DEVICE(0x540E, 4),
271         CH_DEVICE(0x540F, 4),
272         CH_DEVICE(0x5410, 4),
273         CH_DEVICE(0x5411, 4),
274         CH_DEVICE(0x5412, 4),
275         CH_DEVICE(0x5413, 4),
276         { 0, }
277 };
278
279 #define FW4_FNAME "cxgb4/t4fw.bin"
280 #define FW5_FNAME "cxgb4/t5fw.bin"
281 #define FW4_CFNAME "cxgb4/t4-config.txt"
282 #define FW5_CFNAME "cxgb4/t5-config.txt"
283
284 MODULE_DESCRIPTION(DRV_DESC);
285 MODULE_AUTHOR("Chelsio Communications");
286 MODULE_LICENSE("Dual BSD/GPL");
287 MODULE_VERSION(DRV_VERSION);
288 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
289 MODULE_FIRMWARE(FW4_FNAME);
290 MODULE_FIRMWARE(FW5_FNAME);
291
292 /*
293  * Normally we're willing to become the firmware's Master PF but will be happy
294  * if another PF has already become the Master and initialized the adapter.
295  * Setting "force_init" will cause this driver to forcibly establish itself as
296  * the Master PF and initialize the adapter.
297  */
298 static uint force_init;
299
300 module_param(force_init, uint, 0644);
301 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
302
303 /*
304  * Normally if the firmware we connect to has Configuration File support, we
305  * use that and only fall back to the old Driver-based initialization if the
306  * Configuration File fails for some reason.  If force_old_init is set, then
307  * we'll always use the old Driver-based initialization sequence.
308  */
309 static uint force_old_init;
310
311 module_param(force_old_init, uint, 0644);
312 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
313
314 static int dflt_msg_enable = DFLT_MSG_ENABLE;
315
316 module_param(dflt_msg_enable, int, 0644);
317 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
318
319 /*
320  * The driver uses the best interrupt scheme available on a platform in the
321  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
322  * of these schemes the driver may consider as follows:
323  *
324  * msi = 2: choose from among all three options
325  * msi = 1: only consider MSI and INTx interrupts
326  * msi = 0: force INTx interrupts
327  */
328 static int msi = 2;
329
330 module_param(msi, int, 0644);
331 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
332
333 /*
334  * Queue interrupt hold-off timer values.  Queues default to the first of these
335  * upon creation.
336  */
337 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
338
339 module_param_array(intr_holdoff, uint, NULL, 0644);
340 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
341                  "0..4 in microseconds");
342
343 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
344
345 module_param_array(intr_cnt, uint, NULL, 0644);
346 MODULE_PARM_DESC(intr_cnt,
347                  "thresholds 1..3 for queue interrupt packet counters");
348
349 /*
350  * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
351  * offset by 2 bytes in order to have the IP headers line up on 4-byte
352  * boundaries.  This is a requirement for many architectures which will throw
353  * a machine check fault if an attempt is made to access one of the 4-byte IP
354  * header fields on a non-4-byte boundary.  And it's a major performance issue
355  * even on some architectures which allow it like some implementations of the
356  * x86 ISA.  However, some architectures don't mind this and for some very
357  * edge-case performance sensitive applications (like forwarding large volumes
358  * of small packets), setting this DMA offset to 0 will decrease the number of
359  * PCI-E Bus transfers enough to measurably affect performance.
360  */
361 static int rx_dma_offset = 2;
362
363 static bool vf_acls;
364
365 #ifdef CONFIG_PCI_IOV
366 module_param(vf_acls, bool, 0644);
367 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
368
369 /* Configure the number of PCI-E Virtual Function which are to be instantiated
370  * on SR-IOV Capable Physical Functions.
371  */
372 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
373
374 module_param_array(num_vf, uint, NULL, 0644);
375 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
376 #endif
377
378 /*
379  * The filter TCAM has a fixed portion and a variable portion.  The fixed
380  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
381  * ports.  The variable portion is 36 bits which can include things like Exact
382  * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
383  * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
384  * far exceed the 36-bit budget for this "compressed" header portion of the
385  * filter.  Thus, we have a scarce resource which must be carefully managed.
386  *
387  * By default we set this up to mostly match the set of filter matching
388  * capabilities of T3 but with accommodations for some of T4's more
389  * interesting features:
390  *
391  *   { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
392  *     [Inner] VLAN (17), Port (3), FCoE (1) }
393  */
394 enum {
395         TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
396         TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
397         TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
398 };
399
400 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
401
402 module_param(tp_vlan_pri_map, uint, 0644);
403 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
404
405 static struct dentry *cxgb4_debugfs_root;
406
407 static LIST_HEAD(adapter_list);
408 static DEFINE_MUTEX(uld_mutex);
409 /* Adapter list to be accessed from atomic context */
410 static LIST_HEAD(adap_rcu_list);
411 static DEFINE_SPINLOCK(adap_rcu_lock);
412 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
413 static const char *uld_str[] = { "RDMA", "iSCSI" };
414
415 static void link_report(struct net_device *dev)
416 {
417         if (!netif_carrier_ok(dev))
418                 netdev_info(dev, "link down\n");
419         else {
420                 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
421
422                 const char *s = "10Mbps";
423                 const struct port_info *p = netdev_priv(dev);
424
425                 switch (p->link_cfg.speed) {
426                 case SPEED_10000:
427                         s = "10Gbps";
428                         break;
429                 case SPEED_1000:
430                         s = "1000Mbps";
431                         break;
432                 case SPEED_100:
433                         s = "100Mbps";
434                         break;
435                 }
436
437                 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
438                             fc[p->link_cfg.fc]);
439         }
440 }
441
442 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
443 {
444         struct net_device *dev = adapter->port[port_id];
445
446         /* Skip changes from disabled ports. */
447         if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
448                 if (link_stat)
449                         netif_carrier_on(dev);
450                 else
451                         netif_carrier_off(dev);
452
453                 link_report(dev);
454         }
455 }
456
457 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
458 {
459         static const char *mod_str[] = {
460                 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
461         };
462
463         const struct net_device *dev = adap->port[port_id];
464         const struct port_info *pi = netdev_priv(dev);
465
466         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
467                 netdev_info(dev, "port module unplugged\n");
468         else if (pi->mod_type < ARRAY_SIZE(mod_str))
469                 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
470 }
471
472 /*
473  * Configure the exact and hash address filters to handle a port's multicast
474  * and secondary unicast MAC addresses.
475  */
476 static int set_addr_filters(const struct net_device *dev, bool sleep)
477 {
478         u64 mhash = 0;
479         u64 uhash = 0;
480         bool free = true;
481         u16 filt_idx[7];
482         const u8 *addr[7];
483         int ret, naddr = 0;
484         const struct netdev_hw_addr *ha;
485         int uc_cnt = netdev_uc_count(dev);
486         int mc_cnt = netdev_mc_count(dev);
487         const struct port_info *pi = netdev_priv(dev);
488         unsigned int mb = pi->adapter->fn;
489
490         /* first do the secondary unicast addresses */
491         netdev_for_each_uc_addr(ha, dev) {
492                 addr[naddr++] = ha->addr;
493                 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
494                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
495                                         naddr, addr, filt_idx, &uhash, sleep);
496                         if (ret < 0)
497                                 return ret;
498
499                         free = false;
500                         naddr = 0;
501                 }
502         }
503
504         /* next set up the multicast addresses */
505         netdev_for_each_mc_addr(ha, dev) {
506                 addr[naddr++] = ha->addr;
507                 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
508                         ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
509                                         naddr, addr, filt_idx, &mhash, sleep);
510                         if (ret < 0)
511                                 return ret;
512
513                         free = false;
514                         naddr = 0;
515                 }
516         }
517
518         return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
519                                 uhash | mhash, sleep);
520 }
521
522 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
523 module_param(dbfifo_int_thresh, int, 0644);
524 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
525
526 /*
527  * usecs to sleep while draining the dbfifo
528  */
529 static int dbfifo_drain_delay = 1000;
530 module_param(dbfifo_drain_delay, int, 0644);
531 MODULE_PARM_DESC(dbfifo_drain_delay,
532                  "usecs to sleep while draining the dbfifo");
533
534 /*
535  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
536  * If @mtu is -1 it is left unchanged.
537  */
538 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
539 {
540         int ret;
541         struct port_info *pi = netdev_priv(dev);
542
543         ret = set_addr_filters(dev, sleep_ok);
544         if (ret == 0)
545                 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
546                                     (dev->flags & IFF_PROMISC) ? 1 : 0,
547                                     (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
548                                     sleep_ok);
549         return ret;
550 }
551
552 static struct workqueue_struct *workq;
553
554 /**
555  *      link_start - enable a port
556  *      @dev: the port to enable
557  *
558  *      Performs the MAC and PHY actions needed to enable a port.
559  */
560 static int link_start(struct net_device *dev)
561 {
562         int ret;
563         struct port_info *pi = netdev_priv(dev);
564         unsigned int mb = pi->adapter->fn;
565
566         /*
567          * We do not set address filters and promiscuity here, the stack does
568          * that step explicitly.
569          */
570         ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
571                             !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
572         if (ret == 0) {
573                 ret = t4_change_mac(pi->adapter, mb, pi->viid,
574                                     pi->xact_addr_filt, dev->dev_addr, true,
575                                     true);
576                 if (ret >= 0) {
577                         pi->xact_addr_filt = ret;
578                         ret = 0;
579                 }
580         }
581         if (ret == 0)
582                 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
583                                     &pi->link_cfg);
584         if (ret == 0)
585                 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
586         return ret;
587 }
588
589 /* Clear a filter and release any of its resources that we own.  This also
590  * clears the filter's "pending" status.
591  */
592 static void clear_filter(struct adapter *adap, struct filter_entry *f)
593 {
594         /* If the new or old filter have loopback rewriteing rules then we'll
595          * need to free any existing Layer Two Table (L2T) entries of the old
596          * filter rule.  The firmware will handle freeing up any Source MAC
597          * Table (SMT) entries used for rewriting Source MAC Addresses in
598          * loopback rules.
599          */
600         if (f->l2t)
601                 cxgb4_l2t_release(f->l2t);
602
603         /* The zeroing of the filter rule below clears the filter valid,
604          * pending, locked flags, l2t pointer, etc. so it's all we need for
605          * this operation.
606          */
607         memset(f, 0, sizeof(*f));
608 }
609
610 /* Handle a filter write/deletion reply.
611  */
612 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
613 {
614         unsigned int idx = GET_TID(rpl);
615         unsigned int nidx = idx - adap->tids.ftid_base;
616         unsigned int ret;
617         struct filter_entry *f;
618
619         if (idx >= adap->tids.ftid_base && nidx <
620            (adap->tids.nftids + adap->tids.nsftids)) {
621                 idx = nidx;
622                 ret = GET_TCB_COOKIE(rpl->cookie);
623                 f = &adap->tids.ftid_tab[idx];
624
625                 if (ret == FW_FILTER_WR_FLT_DELETED) {
626                         /* Clear the filter when we get confirmation from the
627                          * hardware that the filter has been deleted.
628                          */
629                         clear_filter(adap, f);
630                 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
631                         dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
632                                 idx);
633                         clear_filter(adap, f);
634                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
635                         f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
636                         f->pending = 0;  /* asynchronous setup completed */
637                         f->valid = 1;
638                 } else {
639                         /* Something went wrong.  Issue a warning about the
640                          * problem and clear everything out.
641                          */
642                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
643                                 idx, ret);
644                         clear_filter(adap, f);
645                 }
646         }
647 }
648
649 /* Response queue handler for the FW event queue.
650  */
651 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
652                           const struct pkt_gl *gl)
653 {
654         u8 opcode = ((const struct rss_header *)rsp)->opcode;
655
656         rsp++;                                          /* skip RSS header */
657
658         /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
659          */
660         if (unlikely(opcode == CPL_FW4_MSG &&
661            ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
662                 rsp++;
663                 opcode = ((const struct rss_header *)rsp)->opcode;
664                 rsp++;
665                 if (opcode != CPL_SGE_EGR_UPDATE) {
666                         dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
667                                 , opcode);
668                         goto out;
669                 }
670         }
671
672         if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
673                 const struct cpl_sge_egr_update *p = (void *)rsp;
674                 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
675                 struct sge_txq *txq;
676
677                 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
678                 txq->restarts++;
679                 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
680                         struct sge_eth_txq *eq;
681
682                         eq = container_of(txq, struct sge_eth_txq, q);
683                         netif_tx_wake_queue(eq->txq);
684                 } else {
685                         struct sge_ofld_txq *oq;
686
687                         oq = container_of(txq, struct sge_ofld_txq, q);
688                         tasklet_schedule(&oq->qresume_tsk);
689                 }
690         } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
691                 const struct cpl_fw6_msg *p = (void *)rsp;
692
693                 if (p->type == 0)
694                         t4_handle_fw_rpl(q->adap, p->data);
695         } else if (opcode == CPL_L2T_WRITE_RPL) {
696                 const struct cpl_l2t_write_rpl *p = (void *)rsp;
697
698                 do_l2t_write_rpl(q->adap, p);
699         } else if (opcode == CPL_SET_TCB_RPL) {
700                 const struct cpl_set_tcb_rpl *p = (void *)rsp;
701
702                 filter_rpl(q->adap, p);
703         } else
704                 dev_err(q->adap->pdev_dev,
705                         "unexpected CPL %#x on FW event queue\n", opcode);
706 out:
707         return 0;
708 }
709
710 /**
711  *      uldrx_handler - response queue handler for ULD queues
712  *      @q: the response queue that received the packet
713  *      @rsp: the response queue descriptor holding the offload message
714  *      @gl: the gather list of packet fragments
715  *
716  *      Deliver an ingress offload packet to a ULD.  All processing is done by
717  *      the ULD, we just maintain statistics.
718  */
719 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
720                          const struct pkt_gl *gl)
721 {
722         struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
723
724         /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
725          */
726         if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
727             ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
728                 rsp += 2;
729
730         if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
731                 rxq->stats.nomem++;
732                 return -1;
733         }
734         if (gl == NULL)
735                 rxq->stats.imm++;
736         else if (gl == CXGB4_MSG_AN)
737                 rxq->stats.an++;
738         else
739                 rxq->stats.pkts++;
740         return 0;
741 }
742
743 static void disable_msi(struct adapter *adapter)
744 {
745         if (adapter->flags & USING_MSIX) {
746                 pci_disable_msix(adapter->pdev);
747                 adapter->flags &= ~USING_MSIX;
748         } else if (adapter->flags & USING_MSI) {
749                 pci_disable_msi(adapter->pdev);
750                 adapter->flags &= ~USING_MSI;
751         }
752 }
753
754 /*
755  * Interrupt handler for non-data events used with MSI-X.
756  */
757 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
758 {
759         struct adapter *adap = cookie;
760
761         u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
762         if (v & PFSW) {
763                 adap->swintr = 1;
764                 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
765         }
766         t4_slow_intr_handler(adap);
767         return IRQ_HANDLED;
768 }
769
770 /*
771  * Name the MSI-X interrupts.
772  */
773 static void name_msix_vecs(struct adapter *adap)
774 {
775         int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
776
777         /* non-data interrupts */
778         snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
779
780         /* FW events */
781         snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
782                  adap->port[0]->name);
783
784         /* Ethernet queues */
785         for_each_port(adap, j) {
786                 struct net_device *d = adap->port[j];
787                 const struct port_info *pi = netdev_priv(d);
788
789                 for (i = 0; i < pi->nqsets; i++, msi_idx++)
790                         snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
791                                  d->name, i);
792         }
793
794         /* offload queues */
795         for_each_ofldrxq(&adap->sge, i)
796                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
797                          adap->port[0]->name, i);
798
799         for_each_rdmarxq(&adap->sge, i)
800                 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
801                          adap->port[0]->name, i);
802 }
803
804 static int request_msix_queue_irqs(struct adapter *adap)
805 {
806         struct sge *s = &adap->sge;
807         int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
808
809         err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
810                           adap->msix_info[1].desc, &s->fw_evtq);
811         if (err)
812                 return err;
813
814         for_each_ethrxq(s, ethqidx) {
815                 err = request_irq(adap->msix_info[msi_index].vec,
816                                   t4_sge_intr_msix, 0,
817                                   adap->msix_info[msi_index].desc,
818                                   &s->ethrxq[ethqidx].rspq);
819                 if (err)
820                         goto unwind;
821                 msi_index++;
822         }
823         for_each_ofldrxq(s, ofldqidx) {
824                 err = request_irq(adap->msix_info[msi_index].vec,
825                                   t4_sge_intr_msix, 0,
826                                   adap->msix_info[msi_index].desc,
827                                   &s->ofldrxq[ofldqidx].rspq);
828                 if (err)
829                         goto unwind;
830                 msi_index++;
831         }
832         for_each_rdmarxq(s, rdmaqidx) {
833                 err = request_irq(adap->msix_info[msi_index].vec,
834                                   t4_sge_intr_msix, 0,
835                                   adap->msix_info[msi_index].desc,
836                                   &s->rdmarxq[rdmaqidx].rspq);
837                 if (err)
838                         goto unwind;
839                 msi_index++;
840         }
841         return 0;
842
843 unwind:
844         while (--rdmaqidx >= 0)
845                 free_irq(adap->msix_info[--msi_index].vec,
846                          &s->rdmarxq[rdmaqidx].rspq);
847         while (--ofldqidx >= 0)
848                 free_irq(adap->msix_info[--msi_index].vec,
849                          &s->ofldrxq[ofldqidx].rspq);
850         while (--ethqidx >= 0)
851                 free_irq(adap->msix_info[--msi_index].vec,
852                          &s->ethrxq[ethqidx].rspq);
853         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
854         return err;
855 }
856
857 static void free_msix_queue_irqs(struct adapter *adap)
858 {
859         int i, msi_index = 2;
860         struct sge *s = &adap->sge;
861
862         free_irq(adap->msix_info[1].vec, &s->fw_evtq);
863         for_each_ethrxq(s, i)
864                 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
865         for_each_ofldrxq(s, i)
866                 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
867         for_each_rdmarxq(s, i)
868                 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
869 }
870
871 /**
872  *      write_rss - write the RSS table for a given port
873  *      @pi: the port
874  *      @queues: array of queue indices for RSS
875  *
876  *      Sets up the portion of the HW RSS table for the port's VI to distribute
877  *      packets to the Rx queues in @queues.
878  */
879 static int write_rss(const struct port_info *pi, const u16 *queues)
880 {
881         u16 *rss;
882         int i, err;
883         const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
884
885         rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
886         if (!rss)
887                 return -ENOMEM;
888
889         /* map the queue indices to queue ids */
890         for (i = 0; i < pi->rss_size; i++, queues++)
891                 rss[i] = q[*queues].rspq.abs_id;
892
893         err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
894                                   pi->rss_size, rss, pi->rss_size);
895         kfree(rss);
896         return err;
897 }
898
899 /**
900  *      setup_rss - configure RSS
901  *      @adap: the adapter
902  *
903  *      Sets up RSS for each port.
904  */
905 static int setup_rss(struct adapter *adap)
906 {
907         int i, err;
908
909         for_each_port(adap, i) {
910                 const struct port_info *pi = adap2pinfo(adap, i);
911
912                 err = write_rss(pi, pi->rss);
913                 if (err)
914                         return err;
915         }
916         return 0;
917 }
918
919 /*
920  * Return the channel of the ingress queue with the given qid.
921  */
922 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
923 {
924         qid -= p->ingr_start;
925         return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
926 }
927
928 /*
929  * Wait until all NAPI handlers are descheduled.
930  */
931 static void quiesce_rx(struct adapter *adap)
932 {
933         int i;
934
935         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
936                 struct sge_rspq *q = adap->sge.ingr_map[i];
937
938                 if (q && q->handler)
939                         napi_disable(&q->napi);
940         }
941 }
942
943 /*
944  * Enable NAPI scheduling and interrupt generation for all Rx queues.
945  */
946 static void enable_rx(struct adapter *adap)
947 {
948         int i;
949
950         for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
951                 struct sge_rspq *q = adap->sge.ingr_map[i];
952
953                 if (!q)
954                         continue;
955                 if (q->handler)
956                         napi_enable(&q->napi);
957                 /* 0-increment GTS to start the timer and enable interrupts */
958                 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
959                              SEINTARM(q->intr_params) |
960                              INGRESSQID(q->cntxt_id));
961         }
962 }
963
964 /**
965  *      setup_sge_queues - configure SGE Tx/Rx/response queues
966  *      @adap: the adapter
967  *
968  *      Determines how many sets of SGE queues to use and initializes them.
969  *      We support multiple queue sets per port if we have MSI-X, otherwise
970  *      just one queue set per port.
971  */
972 static int setup_sge_queues(struct adapter *adap)
973 {
974         int err, msi_idx, i, j;
975         struct sge *s = &adap->sge;
976
977         bitmap_zero(s->starving_fl, MAX_EGRQ);
978         bitmap_zero(s->txq_maperr, MAX_EGRQ);
979
980         if (adap->flags & USING_MSIX)
981                 msi_idx = 1;         /* vector 0 is for non-queue interrupts */
982         else {
983                 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
984                                        NULL, NULL);
985                 if (err)
986                         return err;
987                 msi_idx = -((int)s->intrq.abs_id + 1);
988         }
989
990         err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
991                                msi_idx, NULL, fwevtq_handler);
992         if (err) {
993 freeout:        t4_free_sge_resources(adap);
994                 return err;
995         }
996
997         for_each_port(adap, i) {
998                 struct net_device *dev = adap->port[i];
999                 struct port_info *pi = netdev_priv(dev);
1000                 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1001                 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1002
1003                 for (j = 0; j < pi->nqsets; j++, q++) {
1004                         if (msi_idx > 0)
1005                                 msi_idx++;
1006                         err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1007                                                msi_idx, &q->fl,
1008                                                t4_ethrx_handler);
1009                         if (err)
1010                                 goto freeout;
1011                         q->rspq.idx = j;
1012                         memset(&q->stats, 0, sizeof(q->stats));
1013                 }
1014                 for (j = 0; j < pi->nqsets; j++, t++) {
1015                         err = t4_sge_alloc_eth_txq(adap, t, dev,
1016                                         netdev_get_tx_queue(dev, j),
1017                                         s->fw_evtq.cntxt_id);
1018                         if (err)
1019                                 goto freeout;
1020                 }
1021         }
1022
1023         j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1024         for_each_ofldrxq(s, i) {
1025                 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1026                 struct net_device *dev = adap->port[i / j];
1027
1028                 if (msi_idx > 0)
1029                         msi_idx++;
1030                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1031                                        &q->fl, uldrx_handler);
1032                 if (err)
1033                         goto freeout;
1034                 memset(&q->stats, 0, sizeof(q->stats));
1035                 s->ofld_rxq[i] = q->rspq.abs_id;
1036                 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1037                                             s->fw_evtq.cntxt_id);
1038                 if (err)
1039                         goto freeout;
1040         }
1041
1042         for_each_rdmarxq(s, i) {
1043                 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1044
1045                 if (msi_idx > 0)
1046                         msi_idx++;
1047                 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1048                                        msi_idx, &q->fl, uldrx_handler);
1049                 if (err)
1050                         goto freeout;
1051                 memset(&q->stats, 0, sizeof(q->stats));
1052                 s->rdma_rxq[i] = q->rspq.abs_id;
1053         }
1054
1055         for_each_port(adap, i) {
1056                 /*
1057                  * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1058                  * have RDMA queues, and that's the right value.
1059                  */
1060                 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1061                                             s->fw_evtq.cntxt_id,
1062                                             s->rdmarxq[i].rspq.cntxt_id);
1063                 if (err)
1064                         goto freeout;
1065         }
1066
1067         t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
1068                      RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1069                      QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1070         return 0;
1071 }
1072
1073 /*
1074  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1075  * The allocated memory is cleared.
1076  */
1077 void *t4_alloc_mem(size_t size)
1078 {
1079         void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1080
1081         if (!p)
1082                 p = vzalloc(size);
1083         return p;
1084 }
1085
1086 /*
1087  * Free memory allocated through alloc_mem().
1088  */
1089 static void t4_free_mem(void *addr)
1090 {
1091         if (is_vmalloc_addr(addr))
1092                 vfree(addr);
1093         else
1094                 kfree(addr);
1095 }
1096
1097 /* Send a Work Request to write the filter at a specified index.  We construct
1098  * a Firmware Filter Work Request to have the work done and put the indicated
1099  * filter into "pending" mode which will prevent any further actions against
1100  * it till we get a reply from the firmware on the completion status of the
1101  * request.
1102  */
1103 static int set_filter_wr(struct adapter *adapter, int fidx)
1104 {
1105         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1106         struct sk_buff *skb;
1107         struct fw_filter_wr *fwr;
1108         unsigned int ftid;
1109
1110         /* If the new filter requires loopback Destination MAC and/or VLAN
1111          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1112          * the filter.
1113          */
1114         if (f->fs.newdmac || f->fs.newvlan) {
1115                 /* allocate L2T entry for new filter */
1116                 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1117                 if (f->l2t == NULL)
1118                         return -EAGAIN;
1119                 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1120                                         f->fs.eport, f->fs.dmac)) {
1121                         cxgb4_l2t_release(f->l2t);
1122                         f->l2t = NULL;
1123                         return -ENOMEM;
1124                 }
1125         }
1126
1127         ftid = adapter->tids.ftid_base + fidx;
1128
1129         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1130         fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1131         memset(fwr, 0, sizeof(*fwr));
1132
1133         /* It would be nice to put most of the following in t4_hw.c but most
1134          * of the work is translating the cxgbtool ch_filter_specification
1135          * into the Work Request and the definition of that structure is
1136          * currently in cxgbtool.h which isn't appropriate to pull into the
1137          * common code.  We may eventually try to come up with a more neutral
1138          * filter specification structure but for now it's easiest to simply
1139          * put this fairly direct code in line ...
1140          */
1141         fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1142         fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1143         fwr->tid_to_iq =
1144                 htonl(V_FW_FILTER_WR_TID(ftid) |
1145                       V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1146                       V_FW_FILTER_WR_NOREPLY(0) |
1147                       V_FW_FILTER_WR_IQ(f->fs.iq));
1148         fwr->del_filter_to_l2tix =
1149                 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1150                       V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1151                       V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1152                       V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1153                       V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1154                       V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1155                       V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1156                       V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1157                       V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1158                                              f->fs.newvlan == VLAN_REWRITE) |
1159                       V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1160                                             f->fs.newvlan == VLAN_REWRITE) |
1161                       V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1162                       V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1163                       V_FW_FILTER_WR_PRIO(f->fs.prio) |
1164                       V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1165         fwr->ethtype = htons(f->fs.val.ethtype);
1166         fwr->ethtypem = htons(f->fs.mask.ethtype);
1167         fwr->frag_to_ovlan_vldm =
1168                 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1169                  V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1170                  V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1171                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1172                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1173                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1174         fwr->smac_sel = 0;
1175         fwr->rx_chan_rx_rpl_iq =
1176                 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1177                       V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1178         fwr->maci_to_matchtypem =
1179                 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1180                       V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1181                       V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1182                       V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1183                       V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1184                       V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1185                       V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1186                       V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1187         fwr->ptcl = f->fs.val.proto;
1188         fwr->ptclm = f->fs.mask.proto;
1189         fwr->ttyp = f->fs.val.tos;
1190         fwr->ttypm = f->fs.mask.tos;
1191         fwr->ivlan = htons(f->fs.val.ivlan);
1192         fwr->ivlanm = htons(f->fs.mask.ivlan);
1193         fwr->ovlan = htons(f->fs.val.ovlan);
1194         fwr->ovlanm = htons(f->fs.mask.ovlan);
1195         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1196         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1197         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1198         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1199         fwr->lp = htons(f->fs.val.lport);
1200         fwr->lpm = htons(f->fs.mask.lport);
1201         fwr->fp = htons(f->fs.val.fport);
1202         fwr->fpm = htons(f->fs.mask.fport);
1203         if (f->fs.newsmac)
1204                 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1205
1206         /* Mark the filter as "pending" and ship off the Filter Work Request.
1207          * When we get the Work Request Reply we'll clear the pending status.
1208          */
1209         f->pending = 1;
1210         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1211         t4_ofld_send(adapter, skb);
1212         return 0;
1213 }
1214
1215 /* Delete the filter at a specified index.
1216  */
1217 static int del_filter_wr(struct adapter *adapter, int fidx)
1218 {
1219         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1220         struct sk_buff *skb;
1221         struct fw_filter_wr *fwr;
1222         unsigned int len, ftid;
1223
1224         len = sizeof(*fwr);
1225         ftid = adapter->tids.ftid_base + fidx;
1226
1227         skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1228         fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1229         t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1230
1231         /* Mark the filter as "pending" and ship off the Filter Work Request.
1232          * When we get the Work Request Reply we'll clear the pending status.
1233          */
1234         f->pending = 1;
1235         t4_mgmt_tx(adapter, skb);
1236         return 0;
1237 }
1238
1239 static inline int is_offload(const struct adapter *adap)
1240 {
1241         return adap->params.offload;
1242 }
1243
1244 /*
1245  * Implementation of ethtool operations.
1246  */
1247
1248 static u32 get_msglevel(struct net_device *dev)
1249 {
1250         return netdev2adap(dev)->msg_enable;
1251 }
1252
1253 static void set_msglevel(struct net_device *dev, u32 val)
1254 {
1255         netdev2adap(dev)->msg_enable = val;
1256 }
1257
1258 static char stats_strings[][ETH_GSTRING_LEN] = {
1259         "TxOctetsOK         ",
1260         "TxFramesOK         ",
1261         "TxBroadcastFrames  ",
1262         "TxMulticastFrames  ",
1263         "TxUnicastFrames    ",
1264         "TxErrorFrames      ",
1265
1266         "TxFrames64         ",
1267         "TxFrames65To127    ",
1268         "TxFrames128To255   ",
1269         "TxFrames256To511   ",
1270         "TxFrames512To1023  ",
1271         "TxFrames1024To1518 ",
1272         "TxFrames1519ToMax  ",
1273
1274         "TxFramesDropped    ",
1275         "TxPauseFrames      ",
1276         "TxPPP0Frames       ",
1277         "TxPPP1Frames       ",
1278         "TxPPP2Frames       ",
1279         "TxPPP3Frames       ",
1280         "TxPPP4Frames       ",
1281         "TxPPP5Frames       ",
1282         "TxPPP6Frames       ",
1283         "TxPPP7Frames       ",
1284
1285         "RxOctetsOK         ",
1286         "RxFramesOK         ",
1287         "RxBroadcastFrames  ",
1288         "RxMulticastFrames  ",
1289         "RxUnicastFrames    ",
1290
1291         "RxFramesTooLong    ",
1292         "RxJabberErrors     ",
1293         "RxFCSErrors        ",
1294         "RxLengthErrors     ",
1295         "RxSymbolErrors     ",
1296         "RxRuntFrames       ",
1297
1298         "RxFrames64         ",
1299         "RxFrames65To127    ",
1300         "RxFrames128To255   ",
1301         "RxFrames256To511   ",
1302         "RxFrames512To1023  ",
1303         "RxFrames1024To1518 ",
1304         "RxFrames1519ToMax  ",
1305
1306         "RxPauseFrames      ",
1307         "RxPPP0Frames       ",
1308         "RxPPP1Frames       ",
1309         "RxPPP2Frames       ",
1310         "RxPPP3Frames       ",
1311         "RxPPP4Frames       ",
1312         "RxPPP5Frames       ",
1313         "RxPPP6Frames       ",
1314         "RxPPP7Frames       ",
1315
1316         "RxBG0FramesDropped ",
1317         "RxBG1FramesDropped ",
1318         "RxBG2FramesDropped ",
1319         "RxBG3FramesDropped ",
1320         "RxBG0FramesTrunc   ",
1321         "RxBG1FramesTrunc   ",
1322         "RxBG2FramesTrunc   ",
1323         "RxBG3FramesTrunc   ",
1324
1325         "TSO                ",
1326         "TxCsumOffload      ",
1327         "RxCsumGood         ",
1328         "VLANextractions    ",
1329         "VLANinsertions     ",
1330         "GROpackets         ",
1331         "GROmerged          ",
1332         "WriteCoalSuccess   ",
1333         "WriteCoalFail      ",
1334 };
1335
1336 static int get_sset_count(struct net_device *dev, int sset)
1337 {
1338         switch (sset) {
1339         case ETH_SS_STATS:
1340                 return ARRAY_SIZE(stats_strings);
1341         default:
1342                 return -EOPNOTSUPP;
1343         }
1344 }
1345
1346 #define T4_REGMAP_SIZE (160 * 1024)
1347 #define T5_REGMAP_SIZE (332 * 1024)
1348
1349 static int get_regs_len(struct net_device *dev)
1350 {
1351         struct adapter *adap = netdev2adap(dev);
1352         if (is_t4(adap->params.chip))
1353                 return T4_REGMAP_SIZE;
1354         else
1355                 return T5_REGMAP_SIZE;
1356 }
1357
1358 static int get_eeprom_len(struct net_device *dev)
1359 {
1360         return EEPROMSIZE;
1361 }
1362
1363 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1364 {
1365         struct adapter *adapter = netdev2adap(dev);
1366
1367         strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1368         strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1369         strlcpy(info->bus_info, pci_name(adapter->pdev),
1370                 sizeof(info->bus_info));
1371
1372         if (adapter->params.fw_vers)
1373                 snprintf(info->fw_version, sizeof(info->fw_version),
1374                         "%u.%u.%u.%u, TP %u.%u.%u.%u",
1375                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1376                         FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1377                         FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1378                         FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1379                         FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1380                         FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1381                         FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1382                         FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1383 }
1384
1385 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1386 {
1387         if (stringset == ETH_SS_STATS)
1388                 memcpy(data, stats_strings, sizeof(stats_strings));
1389 }
1390
1391 /*
1392  * port stats maintained per queue of the port.  They should be in the same
1393  * order as in stats_strings above.
1394  */
1395 struct queue_port_stats {
1396         u64 tso;
1397         u64 tx_csum;
1398         u64 rx_csum;
1399         u64 vlan_ex;
1400         u64 vlan_ins;
1401         u64 gro_pkts;
1402         u64 gro_merged;
1403 };
1404
1405 static void collect_sge_port_stats(const struct adapter *adap,
1406                 const struct port_info *p, struct queue_port_stats *s)
1407 {
1408         int i;
1409         const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1410         const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1411
1412         memset(s, 0, sizeof(*s));
1413         for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1414                 s->tso += tx->tso;
1415                 s->tx_csum += tx->tx_cso;
1416                 s->rx_csum += rx->stats.rx_cso;
1417                 s->vlan_ex += rx->stats.vlan_ex;
1418                 s->vlan_ins += tx->vlan_ins;
1419                 s->gro_pkts += rx->stats.lro_pkts;
1420                 s->gro_merged += rx->stats.lro_merged;
1421         }
1422 }
1423
1424 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1425                       u64 *data)
1426 {
1427         struct port_info *pi = netdev_priv(dev);
1428         struct adapter *adapter = pi->adapter;
1429         u32 val1, val2;
1430
1431         t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1432
1433         data += sizeof(struct port_stats) / sizeof(u64);
1434         collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1435         data += sizeof(struct queue_port_stats) / sizeof(u64);
1436         if (!is_t4(adapter->params.chip)) {
1437                 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1438                 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1439                 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1440                 *data = val1 - val2;
1441                 data++;
1442                 *data = val2;
1443                 data++;
1444         } else {
1445                 memset(data, 0, 2 * sizeof(u64));
1446                 *data += 2;
1447         }
1448 }
1449
1450 /*
1451  * Return a version number to identify the type of adapter.  The scheme is:
1452  * - bits 0..9: chip version
1453  * - bits 10..15: chip revision
1454  * - bits 16..23: register dump version
1455  */
1456 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1457 {
1458         return CHELSIO_CHIP_VERSION(ap->params.chip) |
1459                 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1460 }
1461
1462 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1463                            unsigned int end)
1464 {
1465         u32 *p = buf + start;
1466
1467         for ( ; start <= end; start += sizeof(u32))
1468                 *p++ = t4_read_reg(ap, start);
1469 }
1470
1471 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1472                      void *buf)
1473 {
1474         static const unsigned int t4_reg_ranges[] = {
1475                 0x1008, 0x1108,
1476                 0x1180, 0x11b4,
1477                 0x11fc, 0x123c,
1478                 0x1300, 0x173c,
1479                 0x1800, 0x18fc,
1480                 0x3000, 0x30d8,
1481                 0x30e0, 0x5924,
1482                 0x5960, 0x59d4,
1483                 0x5a00, 0x5af8,
1484                 0x6000, 0x6098,
1485                 0x6100, 0x6150,
1486                 0x6200, 0x6208,
1487                 0x6240, 0x6248,
1488                 0x6280, 0x6338,
1489                 0x6370, 0x638c,
1490                 0x6400, 0x643c,
1491                 0x6500, 0x6524,
1492                 0x6a00, 0x6a38,
1493                 0x6a60, 0x6a78,
1494                 0x6b00, 0x6b84,
1495                 0x6bf0, 0x6c84,
1496                 0x6cf0, 0x6d84,
1497                 0x6df0, 0x6e84,
1498                 0x6ef0, 0x6f84,
1499                 0x6ff0, 0x7084,
1500                 0x70f0, 0x7184,
1501                 0x71f0, 0x7284,
1502                 0x72f0, 0x7384,
1503                 0x73f0, 0x7450,
1504                 0x7500, 0x7530,
1505                 0x7600, 0x761c,
1506                 0x7680, 0x76cc,
1507                 0x7700, 0x7798,
1508                 0x77c0, 0x77fc,
1509                 0x7900, 0x79fc,
1510                 0x7b00, 0x7c38,
1511                 0x7d00, 0x7efc,
1512                 0x8dc0, 0x8e1c,
1513                 0x8e30, 0x8e78,
1514                 0x8ea0, 0x8f6c,
1515                 0x8fc0, 0x9074,
1516                 0x90fc, 0x90fc,
1517                 0x9400, 0x9458,
1518                 0x9600, 0x96bc,
1519                 0x9800, 0x9808,
1520                 0x9820, 0x983c,
1521                 0x9850, 0x9864,
1522                 0x9c00, 0x9c6c,
1523                 0x9c80, 0x9cec,
1524                 0x9d00, 0x9d6c,
1525                 0x9d80, 0x9dec,
1526                 0x9e00, 0x9e6c,
1527                 0x9e80, 0x9eec,
1528                 0x9f00, 0x9f6c,
1529                 0x9f80, 0x9fec,
1530                 0xd004, 0xd03c,
1531                 0xdfc0, 0xdfe0,
1532                 0xe000, 0xea7c,
1533                 0xf000, 0x11190,
1534                 0x19040, 0x1906c,
1535                 0x19078, 0x19080,
1536                 0x1908c, 0x19124,
1537                 0x19150, 0x191b0,
1538                 0x191d0, 0x191e8,
1539                 0x19238, 0x1924c,
1540                 0x193f8, 0x19474,
1541                 0x19490, 0x194f8,
1542                 0x19800, 0x19f30,
1543                 0x1a000, 0x1a06c,
1544                 0x1a0b0, 0x1a120,
1545                 0x1a128, 0x1a138,
1546                 0x1a190, 0x1a1c4,
1547                 0x1a1fc, 0x1a1fc,
1548                 0x1e040, 0x1e04c,
1549                 0x1e284, 0x1e28c,
1550                 0x1e2c0, 0x1e2c0,
1551                 0x1e2e0, 0x1e2e0,
1552                 0x1e300, 0x1e384,
1553                 0x1e3c0, 0x1e3c8,
1554                 0x1e440, 0x1e44c,
1555                 0x1e684, 0x1e68c,
1556                 0x1e6c0, 0x1e6c0,
1557                 0x1e6e0, 0x1e6e0,
1558                 0x1e700, 0x1e784,
1559                 0x1e7c0, 0x1e7c8,
1560                 0x1e840, 0x1e84c,
1561                 0x1ea84, 0x1ea8c,
1562                 0x1eac0, 0x1eac0,
1563                 0x1eae0, 0x1eae0,
1564                 0x1eb00, 0x1eb84,
1565                 0x1ebc0, 0x1ebc8,
1566                 0x1ec40, 0x1ec4c,
1567                 0x1ee84, 0x1ee8c,
1568                 0x1eec0, 0x1eec0,
1569                 0x1eee0, 0x1eee0,
1570                 0x1ef00, 0x1ef84,
1571                 0x1efc0, 0x1efc8,
1572                 0x1f040, 0x1f04c,
1573                 0x1f284, 0x1f28c,
1574                 0x1f2c0, 0x1f2c0,
1575                 0x1f2e0, 0x1f2e0,
1576                 0x1f300, 0x1f384,
1577                 0x1f3c0, 0x1f3c8,
1578                 0x1f440, 0x1f44c,
1579                 0x1f684, 0x1f68c,
1580                 0x1f6c0, 0x1f6c0,
1581                 0x1f6e0, 0x1f6e0,
1582                 0x1f700, 0x1f784,
1583                 0x1f7c0, 0x1f7c8,
1584                 0x1f840, 0x1f84c,
1585                 0x1fa84, 0x1fa8c,
1586                 0x1fac0, 0x1fac0,
1587                 0x1fae0, 0x1fae0,
1588                 0x1fb00, 0x1fb84,
1589                 0x1fbc0, 0x1fbc8,
1590                 0x1fc40, 0x1fc4c,
1591                 0x1fe84, 0x1fe8c,
1592                 0x1fec0, 0x1fec0,
1593                 0x1fee0, 0x1fee0,
1594                 0x1ff00, 0x1ff84,
1595                 0x1ffc0, 0x1ffc8,
1596                 0x20000, 0x2002c,
1597                 0x20100, 0x2013c,
1598                 0x20190, 0x201c8,
1599                 0x20200, 0x20318,
1600                 0x20400, 0x20528,
1601                 0x20540, 0x20614,
1602                 0x21000, 0x21040,
1603                 0x2104c, 0x21060,
1604                 0x210c0, 0x210ec,
1605                 0x21200, 0x21268,
1606                 0x21270, 0x21284,
1607                 0x212fc, 0x21388,
1608                 0x21400, 0x21404,
1609                 0x21500, 0x21518,
1610                 0x2152c, 0x2153c,
1611                 0x21550, 0x21554,
1612                 0x21600, 0x21600,
1613                 0x21608, 0x21628,
1614                 0x21630, 0x2163c,
1615                 0x21700, 0x2171c,
1616                 0x21780, 0x2178c,
1617                 0x21800, 0x21c38,
1618                 0x21c80, 0x21d7c,
1619                 0x21e00, 0x21e04,
1620                 0x22000, 0x2202c,
1621                 0x22100, 0x2213c,
1622                 0x22190, 0x221c8,
1623                 0x22200, 0x22318,
1624                 0x22400, 0x22528,
1625                 0x22540, 0x22614,
1626                 0x23000, 0x23040,
1627                 0x2304c, 0x23060,
1628                 0x230c0, 0x230ec,
1629                 0x23200, 0x23268,
1630                 0x23270, 0x23284,
1631                 0x232fc, 0x23388,
1632                 0x23400, 0x23404,
1633                 0x23500, 0x23518,
1634                 0x2352c, 0x2353c,
1635                 0x23550, 0x23554,
1636                 0x23600, 0x23600,
1637                 0x23608, 0x23628,
1638                 0x23630, 0x2363c,
1639                 0x23700, 0x2371c,
1640                 0x23780, 0x2378c,
1641                 0x23800, 0x23c38,
1642                 0x23c80, 0x23d7c,
1643                 0x23e00, 0x23e04,
1644                 0x24000, 0x2402c,
1645                 0x24100, 0x2413c,
1646                 0x24190, 0x241c8,
1647                 0x24200, 0x24318,
1648                 0x24400, 0x24528,
1649                 0x24540, 0x24614,
1650                 0x25000, 0x25040,
1651                 0x2504c, 0x25060,
1652                 0x250c0, 0x250ec,
1653                 0x25200, 0x25268,
1654                 0x25270, 0x25284,
1655                 0x252fc, 0x25388,
1656                 0x25400, 0x25404,
1657                 0x25500, 0x25518,
1658                 0x2552c, 0x2553c,
1659                 0x25550, 0x25554,
1660                 0x25600, 0x25600,
1661                 0x25608, 0x25628,
1662                 0x25630, 0x2563c,
1663                 0x25700, 0x2571c,
1664                 0x25780, 0x2578c,
1665                 0x25800, 0x25c38,
1666                 0x25c80, 0x25d7c,
1667                 0x25e00, 0x25e04,
1668                 0x26000, 0x2602c,
1669                 0x26100, 0x2613c,
1670                 0x26190, 0x261c8,
1671                 0x26200, 0x26318,
1672                 0x26400, 0x26528,
1673                 0x26540, 0x26614,
1674                 0x27000, 0x27040,
1675                 0x2704c, 0x27060,
1676                 0x270c0, 0x270ec,
1677                 0x27200, 0x27268,
1678                 0x27270, 0x27284,
1679                 0x272fc, 0x27388,
1680                 0x27400, 0x27404,
1681                 0x27500, 0x27518,
1682                 0x2752c, 0x2753c,
1683                 0x27550, 0x27554,
1684                 0x27600, 0x27600,
1685                 0x27608, 0x27628,
1686                 0x27630, 0x2763c,
1687                 0x27700, 0x2771c,
1688                 0x27780, 0x2778c,
1689                 0x27800, 0x27c38,
1690                 0x27c80, 0x27d7c,
1691                 0x27e00, 0x27e04
1692         };
1693
1694         static const unsigned int t5_reg_ranges[] = {
1695                 0x1008, 0x1148,
1696                 0x1180, 0x11b4,
1697                 0x11fc, 0x123c,
1698                 0x1280, 0x173c,
1699                 0x1800, 0x18fc,
1700                 0x3000, 0x3028,
1701                 0x3060, 0x30d8,
1702                 0x30e0, 0x30fc,
1703                 0x3140, 0x357c,
1704                 0x35a8, 0x35cc,
1705                 0x35ec, 0x35ec,
1706                 0x3600, 0x5624,
1707                 0x56cc, 0x575c,
1708                 0x580c, 0x5814,
1709                 0x5890, 0x58bc,
1710                 0x5940, 0x59dc,
1711                 0x59fc, 0x5a18,
1712                 0x5a60, 0x5a9c,
1713                 0x5b9c, 0x5bfc,
1714                 0x6000, 0x6040,
1715                 0x6058, 0x614c,
1716                 0x7700, 0x7798,
1717                 0x77c0, 0x78fc,
1718                 0x7b00, 0x7c54,
1719                 0x7d00, 0x7efc,
1720                 0x8dc0, 0x8de0,
1721                 0x8df8, 0x8e84,
1722                 0x8ea0, 0x8f84,
1723                 0x8fc0, 0x90f8,
1724                 0x9400, 0x9470,
1725                 0x9600, 0x96f4,
1726                 0x9800, 0x9808,
1727                 0x9820, 0x983c,
1728                 0x9850, 0x9864,
1729                 0x9c00, 0x9c6c,
1730                 0x9c80, 0x9cec,
1731                 0x9d00, 0x9d6c,
1732                 0x9d80, 0x9dec,
1733                 0x9e00, 0x9e6c,
1734                 0x9e80, 0x9eec,
1735                 0x9f00, 0x9f6c,
1736                 0x9f80, 0xa020,
1737                 0xd004, 0xd03c,
1738                 0xdfc0, 0xdfe0,
1739                 0xe000, 0x11088,
1740                 0x1109c, 0x1117c,
1741                 0x11190, 0x11204,
1742                 0x19040, 0x1906c,
1743                 0x19078, 0x19080,
1744                 0x1908c, 0x19124,
1745                 0x19150, 0x191b0,
1746                 0x191d0, 0x191e8,
1747                 0x19238, 0x19290,
1748                 0x193f8, 0x19474,
1749                 0x19490, 0x194cc,
1750                 0x194f0, 0x194f8,
1751                 0x19c00, 0x19c60,
1752                 0x19c94, 0x19e10,
1753                 0x19e50, 0x19f34,
1754                 0x19f40, 0x19f50,
1755                 0x19f90, 0x19fe4,
1756                 0x1a000, 0x1a06c,
1757                 0x1a0b0, 0x1a120,
1758                 0x1a128, 0x1a138,
1759                 0x1a190, 0x1a1c4,
1760                 0x1a1fc, 0x1a1fc,
1761                 0x1e008, 0x1e00c,
1762                 0x1e040, 0x1e04c,
1763                 0x1e284, 0x1e290,
1764                 0x1e2c0, 0x1e2c0,
1765                 0x1e2e0, 0x1e2e0,
1766                 0x1e300, 0x1e384,
1767                 0x1e3c0, 0x1e3c8,
1768                 0x1e408, 0x1e40c,
1769                 0x1e440, 0x1e44c,
1770                 0x1e684, 0x1e690,
1771                 0x1e6c0, 0x1e6c0,
1772                 0x1e6e0, 0x1e6e0,
1773                 0x1e700, 0x1e784,
1774                 0x1e7c0, 0x1e7c8,
1775                 0x1e808, 0x1e80c,
1776                 0x1e840, 0x1e84c,
1777                 0x1ea84, 0x1ea90,
1778                 0x1eac0, 0x1eac0,
1779                 0x1eae0, 0x1eae0,
1780                 0x1eb00, 0x1eb84,
1781                 0x1ebc0, 0x1ebc8,
1782                 0x1ec08, 0x1ec0c,
1783                 0x1ec40, 0x1ec4c,
1784                 0x1ee84, 0x1ee90,
1785                 0x1eec0, 0x1eec0,
1786                 0x1eee0, 0x1eee0,
1787                 0x1ef00, 0x1ef84,
1788                 0x1efc0, 0x1efc8,
1789                 0x1f008, 0x1f00c,
1790                 0x1f040, 0x1f04c,
1791                 0x1f284, 0x1f290,
1792                 0x1f2c0, 0x1f2c0,
1793                 0x1f2e0, 0x1f2e0,
1794                 0x1f300, 0x1f384,
1795                 0x1f3c0, 0x1f3c8,
1796                 0x1f408, 0x1f40c,
1797                 0x1f440, 0x1f44c,
1798                 0x1f684, 0x1f690,
1799                 0x1f6c0, 0x1f6c0,
1800                 0x1f6e0, 0x1f6e0,
1801                 0x1f700, 0x1f784,
1802                 0x1f7c0, 0x1f7c8,
1803                 0x1f808, 0x1f80c,
1804                 0x1f840, 0x1f84c,
1805                 0x1fa84, 0x1fa90,
1806                 0x1fac0, 0x1fac0,
1807                 0x1fae0, 0x1fae0,
1808                 0x1fb00, 0x1fb84,
1809                 0x1fbc0, 0x1fbc8,
1810                 0x1fc08, 0x1fc0c,
1811                 0x1fc40, 0x1fc4c,
1812                 0x1fe84, 0x1fe90,
1813                 0x1fec0, 0x1fec0,
1814                 0x1fee0, 0x1fee0,
1815                 0x1ff00, 0x1ff84,
1816                 0x1ffc0, 0x1ffc8,
1817                 0x30000, 0x30030,
1818                 0x30100, 0x30144,
1819                 0x30190, 0x301d0,
1820                 0x30200, 0x30318,
1821                 0x30400, 0x3052c,
1822                 0x30540, 0x3061c,
1823                 0x30800, 0x30834,
1824                 0x308c0, 0x30908,
1825                 0x30910, 0x309ac,
1826                 0x30a00, 0x30a04,
1827                 0x30a0c, 0x30a2c,
1828                 0x30a44, 0x30a50,
1829                 0x30a74, 0x30c24,
1830                 0x30d08, 0x30d14,
1831                 0x30d1c, 0x30d20,
1832                 0x30d3c, 0x30d50,
1833                 0x31200, 0x3120c,
1834                 0x31220, 0x31220,
1835                 0x31240, 0x31240,
1836                 0x31600, 0x31600,
1837                 0x31608, 0x3160c,
1838                 0x31a00, 0x31a1c,
1839                 0x31e04, 0x31e20,
1840                 0x31e38, 0x31e3c,
1841                 0x31e80, 0x31e80,
1842                 0x31e88, 0x31ea8,
1843                 0x31eb0, 0x31eb4,
1844                 0x31ec8, 0x31ed4,
1845                 0x31fb8, 0x32004,
1846                 0x32208, 0x3223c,
1847                 0x32600, 0x32630,
1848                 0x32a00, 0x32abc,
1849                 0x32b00, 0x32b70,
1850                 0x33000, 0x33048,
1851                 0x33060, 0x3309c,
1852                 0x330f0, 0x33148,
1853                 0x33160, 0x3319c,
1854                 0x331f0, 0x332e4,
1855                 0x332f8, 0x333e4,
1856                 0x333f8, 0x33448,
1857                 0x33460, 0x3349c,
1858                 0x334f0, 0x33548,
1859                 0x33560, 0x3359c,
1860                 0x335f0, 0x336e4,
1861                 0x336f8, 0x337e4,
1862                 0x337f8, 0x337fc,
1863                 0x33814, 0x33814,
1864                 0x3382c, 0x3382c,
1865                 0x33880, 0x3388c,
1866                 0x338e8, 0x338ec,
1867                 0x33900, 0x33948,
1868                 0x33960, 0x3399c,
1869                 0x339f0, 0x33ae4,
1870                 0x33af8, 0x33b10,
1871                 0x33b28, 0x33b28,
1872                 0x33b3c, 0x33b50,
1873                 0x33bf0, 0x33c10,
1874                 0x33c28, 0x33c28,
1875                 0x33c3c, 0x33c50,
1876                 0x33cf0, 0x33cfc,
1877                 0x34000, 0x34030,
1878                 0x34100, 0x34144,
1879                 0x34190, 0x341d0,
1880                 0x34200, 0x34318,
1881                 0x34400, 0x3452c,
1882                 0x34540, 0x3461c,
1883                 0x34800, 0x34834,
1884                 0x348c0, 0x34908,
1885                 0x34910, 0x349ac,
1886                 0x34a00, 0x34a04,
1887                 0x34a0c, 0x34a2c,
1888                 0x34a44, 0x34a50,
1889                 0x34a74, 0x34c24,
1890                 0x34d08, 0x34d14,
1891                 0x34d1c, 0x34d20,
1892                 0x34d3c, 0x34d50,
1893                 0x35200, 0x3520c,
1894                 0x35220, 0x35220,
1895                 0x35240, 0x35240,
1896                 0x35600, 0x35600,
1897                 0x35608, 0x3560c,
1898                 0x35a00, 0x35a1c,
1899                 0x35e04, 0x35e20,
1900                 0x35e38, 0x35e3c,
1901                 0x35e80, 0x35e80,
1902                 0x35e88, 0x35ea8,
1903                 0x35eb0, 0x35eb4,
1904                 0x35ec8, 0x35ed4,
1905                 0x35fb8, 0x36004,
1906                 0x36208, 0x3623c,
1907                 0x36600, 0x36630,
1908                 0x36a00, 0x36abc,
1909                 0x36b00, 0x36b70,
1910                 0x37000, 0x37048,
1911                 0x37060, 0x3709c,
1912                 0x370f0, 0x37148,
1913                 0x37160, 0x3719c,
1914                 0x371f0, 0x372e4,
1915                 0x372f8, 0x373e4,
1916                 0x373f8, 0x37448,
1917                 0x37460, 0x3749c,
1918                 0x374f0, 0x37548,
1919                 0x37560, 0x3759c,
1920                 0x375f0, 0x376e4,
1921                 0x376f8, 0x377e4,
1922                 0x377f8, 0x377fc,
1923                 0x37814, 0x37814,
1924                 0x3782c, 0x3782c,
1925                 0x37880, 0x3788c,
1926                 0x378e8, 0x378ec,
1927                 0x37900, 0x37948,
1928                 0x37960, 0x3799c,
1929                 0x379f0, 0x37ae4,
1930                 0x37af8, 0x37b10,
1931                 0x37b28, 0x37b28,
1932                 0x37b3c, 0x37b50,
1933                 0x37bf0, 0x37c10,
1934                 0x37c28, 0x37c28,
1935                 0x37c3c, 0x37c50,
1936                 0x37cf0, 0x37cfc,
1937                 0x38000, 0x38030,
1938                 0x38100, 0x38144,
1939                 0x38190, 0x381d0,
1940                 0x38200, 0x38318,
1941                 0x38400, 0x3852c,
1942                 0x38540, 0x3861c,
1943                 0x38800, 0x38834,
1944                 0x388c0, 0x38908,
1945                 0x38910, 0x389ac,
1946                 0x38a00, 0x38a04,
1947                 0x38a0c, 0x38a2c,
1948                 0x38a44, 0x38a50,
1949                 0x38a74, 0x38c24,
1950                 0x38d08, 0x38d14,
1951                 0x38d1c, 0x38d20,
1952                 0x38d3c, 0x38d50,
1953                 0x39200, 0x3920c,
1954                 0x39220, 0x39220,
1955                 0x39240, 0x39240,
1956                 0x39600, 0x39600,
1957                 0x39608, 0x3960c,
1958                 0x39a00, 0x39a1c,
1959                 0x39e04, 0x39e20,
1960                 0x39e38, 0x39e3c,
1961                 0x39e80, 0x39e80,
1962                 0x39e88, 0x39ea8,
1963                 0x39eb0, 0x39eb4,
1964                 0x39ec8, 0x39ed4,
1965                 0x39fb8, 0x3a004,
1966                 0x3a208, 0x3a23c,
1967                 0x3a600, 0x3a630,
1968                 0x3aa00, 0x3aabc,
1969                 0x3ab00, 0x3ab70,
1970                 0x3b000, 0x3b048,
1971                 0x3b060, 0x3b09c,
1972                 0x3b0f0, 0x3b148,
1973                 0x3b160, 0x3b19c,
1974                 0x3b1f0, 0x3b2e4,
1975                 0x3b2f8, 0x3b3e4,
1976                 0x3b3f8, 0x3b448,
1977                 0x3b460, 0x3b49c,
1978                 0x3b4f0, 0x3b548,
1979                 0x3b560, 0x3b59c,
1980                 0x3b5f0, 0x3b6e4,
1981                 0x3b6f8, 0x3b7e4,
1982                 0x3b7f8, 0x3b7fc,
1983                 0x3b814, 0x3b814,
1984                 0x3b82c, 0x3b82c,
1985                 0x3b880, 0x3b88c,
1986                 0x3b8e8, 0x3b8ec,
1987                 0x3b900, 0x3b948,
1988                 0x3b960, 0x3b99c,
1989                 0x3b9f0, 0x3bae4,
1990                 0x3baf8, 0x3bb10,
1991                 0x3bb28, 0x3bb28,
1992                 0x3bb3c, 0x3bb50,
1993                 0x3bbf0, 0x3bc10,
1994                 0x3bc28, 0x3bc28,
1995                 0x3bc3c, 0x3bc50,
1996                 0x3bcf0, 0x3bcfc,
1997                 0x3c000, 0x3c030,
1998                 0x3c100, 0x3c144,
1999                 0x3c190, 0x3c1d0,
2000                 0x3c200, 0x3c318,
2001                 0x3c400, 0x3c52c,
2002                 0x3c540, 0x3c61c,
2003                 0x3c800, 0x3c834,
2004                 0x3c8c0, 0x3c908,
2005                 0x3c910, 0x3c9ac,
2006                 0x3ca00, 0x3ca04,
2007                 0x3ca0c, 0x3ca2c,
2008                 0x3ca44, 0x3ca50,
2009                 0x3ca74, 0x3cc24,
2010                 0x3cd08, 0x3cd14,
2011                 0x3cd1c, 0x3cd20,
2012                 0x3cd3c, 0x3cd50,
2013                 0x3d200, 0x3d20c,
2014                 0x3d220, 0x3d220,
2015                 0x3d240, 0x3d240,
2016                 0x3d600, 0x3d600,
2017                 0x3d608, 0x3d60c,
2018                 0x3da00, 0x3da1c,
2019                 0x3de04, 0x3de20,
2020                 0x3de38, 0x3de3c,
2021                 0x3de80, 0x3de80,
2022                 0x3de88, 0x3dea8,
2023                 0x3deb0, 0x3deb4,
2024                 0x3dec8, 0x3ded4,
2025                 0x3dfb8, 0x3e004,
2026                 0x3e208, 0x3e23c,
2027                 0x3e600, 0x3e630,
2028                 0x3ea00, 0x3eabc,
2029                 0x3eb00, 0x3eb70,
2030                 0x3f000, 0x3f048,
2031                 0x3f060, 0x3f09c,
2032                 0x3f0f0, 0x3f148,
2033                 0x3f160, 0x3f19c,
2034                 0x3f1f0, 0x3f2e4,
2035                 0x3f2f8, 0x3f3e4,
2036                 0x3f3f8, 0x3f448,
2037                 0x3f460, 0x3f49c,
2038                 0x3f4f0, 0x3f548,
2039                 0x3f560, 0x3f59c,
2040                 0x3f5f0, 0x3f6e4,
2041                 0x3f6f8, 0x3f7e4,
2042                 0x3f7f8, 0x3f7fc,
2043                 0x3f814, 0x3f814,
2044                 0x3f82c, 0x3f82c,
2045                 0x3f880, 0x3f88c,
2046                 0x3f8e8, 0x3f8ec,
2047                 0x3f900, 0x3f948,
2048                 0x3f960, 0x3f99c,
2049                 0x3f9f0, 0x3fae4,
2050                 0x3faf8, 0x3fb10,
2051                 0x3fb28, 0x3fb28,
2052                 0x3fb3c, 0x3fb50,
2053                 0x3fbf0, 0x3fc10,
2054                 0x3fc28, 0x3fc28,
2055                 0x3fc3c, 0x3fc50,
2056                 0x3fcf0, 0x3fcfc,
2057                 0x40000, 0x4000c,
2058                 0x40040, 0x40068,
2059                 0x40080, 0x40144,
2060                 0x40180, 0x4018c,
2061                 0x40200, 0x40298,
2062                 0x402ac, 0x4033c,
2063                 0x403f8, 0x403fc,
2064                 0x41300, 0x413c4,
2065                 0x41400, 0x4141c,
2066                 0x41480, 0x414d0,
2067                 0x44000, 0x44078,
2068                 0x440c0, 0x44278,
2069                 0x442c0, 0x44478,
2070                 0x444c0, 0x44678,
2071                 0x446c0, 0x44878,
2072                 0x448c0, 0x449fc,
2073                 0x45000, 0x45068,
2074                 0x45080, 0x45084,
2075                 0x450a0, 0x450b0,
2076                 0x45200, 0x45268,
2077                 0x45280, 0x45284,
2078                 0x452a0, 0x452b0,
2079                 0x460c0, 0x460e4,
2080                 0x47000, 0x4708c,
2081                 0x47200, 0x47250,
2082                 0x47400, 0x47420,
2083                 0x47600, 0x47618,
2084                 0x47800, 0x47814,
2085                 0x48000, 0x4800c,
2086                 0x48040, 0x48068,
2087                 0x48080, 0x48144,
2088                 0x48180, 0x4818c,
2089                 0x48200, 0x48298,
2090                 0x482ac, 0x4833c,
2091                 0x483f8, 0x483fc,
2092                 0x49300, 0x493c4,
2093                 0x49400, 0x4941c,
2094                 0x49480, 0x494d0,
2095                 0x4c000, 0x4c078,
2096                 0x4c0c0, 0x4c278,
2097                 0x4c2c0, 0x4c478,
2098                 0x4c4c0, 0x4c678,
2099                 0x4c6c0, 0x4c878,
2100                 0x4c8c0, 0x4c9fc,
2101                 0x4d000, 0x4d068,
2102                 0x4d080, 0x4d084,
2103                 0x4d0a0, 0x4d0b0,
2104                 0x4d200, 0x4d268,
2105                 0x4d280, 0x4d284,
2106                 0x4d2a0, 0x4d2b0,
2107                 0x4e0c0, 0x4e0e4,
2108                 0x4f000, 0x4f08c,
2109                 0x4f200, 0x4f250,
2110                 0x4f400, 0x4f420,
2111                 0x4f600, 0x4f618,
2112                 0x4f800, 0x4f814,
2113                 0x50000, 0x500cc,
2114                 0x50400, 0x50400,
2115                 0x50800, 0x508cc,
2116                 0x50c00, 0x50c00,
2117                 0x51000, 0x5101c,
2118                 0x51300, 0x51308,
2119         };
2120
2121         int i;
2122         struct adapter *ap = netdev2adap(dev);
2123         static const unsigned int *reg_ranges;
2124         int arr_size = 0, buf_size = 0;
2125
2126         if (is_t4(ap->params.chip)) {
2127                 reg_ranges = &t4_reg_ranges[0];
2128                 arr_size = ARRAY_SIZE(t4_reg_ranges);
2129                 buf_size = T4_REGMAP_SIZE;
2130         } else {
2131                 reg_ranges = &t5_reg_ranges[0];
2132                 arr_size = ARRAY_SIZE(t5_reg_ranges);
2133                 buf_size = T5_REGMAP_SIZE;
2134         }
2135
2136         regs->version = mk_adap_vers(ap);
2137
2138         memset(buf, 0, buf_size);
2139         for (i = 0; i < arr_size; i += 2)
2140                 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2141 }
2142
2143 static int restart_autoneg(struct net_device *dev)
2144 {
2145         struct port_info *p = netdev_priv(dev);
2146
2147         if (!netif_running(dev))
2148                 return -EAGAIN;
2149         if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2150                 return -EINVAL;
2151         t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2152         return 0;
2153 }
2154
2155 static int identify_port(struct net_device *dev,
2156                          enum ethtool_phys_id_state state)
2157 {
2158         unsigned int val;
2159         struct adapter *adap = netdev2adap(dev);
2160
2161         if (state == ETHTOOL_ID_ACTIVE)
2162                 val = 0xffff;
2163         else if (state == ETHTOOL_ID_INACTIVE)
2164                 val = 0;
2165         else
2166                 return -EINVAL;
2167
2168         return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2169 }
2170
2171 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2172 {
2173         unsigned int v = 0;
2174
2175         if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2176             type == FW_PORT_TYPE_BT_XAUI) {
2177                 v |= SUPPORTED_TP;
2178                 if (caps & FW_PORT_CAP_SPEED_100M)
2179                         v |= SUPPORTED_100baseT_Full;
2180                 if (caps & FW_PORT_CAP_SPEED_1G)
2181                         v |= SUPPORTED_1000baseT_Full;
2182                 if (caps & FW_PORT_CAP_SPEED_10G)
2183                         v |= SUPPORTED_10000baseT_Full;
2184         } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2185                 v |= SUPPORTED_Backplane;
2186                 if (caps & FW_PORT_CAP_SPEED_1G)
2187                         v |= SUPPORTED_1000baseKX_Full;
2188                 if (caps & FW_PORT_CAP_SPEED_10G)
2189                         v |= SUPPORTED_10000baseKX4_Full;
2190         } else if (type == FW_PORT_TYPE_KR)
2191                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2192         else if (type == FW_PORT_TYPE_BP_AP)
2193                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2194                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2195         else if (type == FW_PORT_TYPE_BP4_AP)
2196                 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2197                      SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2198                      SUPPORTED_10000baseKX4_Full;
2199         else if (type == FW_PORT_TYPE_FIBER_XFI ||
2200                  type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2201                 v |= SUPPORTED_FIBRE;
2202
2203         if (caps & FW_PORT_CAP_ANEG)
2204                 v |= SUPPORTED_Autoneg;
2205         return v;
2206 }
2207
2208 static unsigned int to_fw_linkcaps(unsigned int caps)
2209 {
2210         unsigned int v = 0;
2211
2212         if (caps & ADVERTISED_100baseT_Full)
2213                 v |= FW_PORT_CAP_SPEED_100M;
2214         if (caps & ADVERTISED_1000baseT_Full)
2215                 v |= FW_PORT_CAP_SPEED_1G;
2216         if (caps & ADVERTISED_10000baseT_Full)
2217                 v |= FW_PORT_CAP_SPEED_10G;
2218         return v;
2219 }
2220
2221 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2222 {
2223         const struct port_info *p = netdev_priv(dev);
2224
2225         if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2226             p->port_type == FW_PORT_TYPE_BT_XFI ||
2227             p->port_type == FW_PORT_TYPE_BT_XAUI)
2228                 cmd->port = PORT_TP;
2229         else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2230                  p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2231                 cmd->port = PORT_FIBRE;
2232         else if (p->port_type == FW_PORT_TYPE_SFP) {
2233                 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2234                     p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2235                         cmd->port = PORT_DA;
2236                 else
2237                         cmd->port = PORT_FIBRE;
2238         } else
2239                 cmd->port = PORT_OTHER;
2240
2241         if (p->mdio_addr >= 0) {
2242                 cmd->phy_address = p->mdio_addr;
2243                 cmd->transceiver = XCVR_EXTERNAL;
2244                 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2245                         MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2246         } else {
2247                 cmd->phy_address = 0;  /* not really, but no better option */
2248                 cmd->transceiver = XCVR_INTERNAL;
2249                 cmd->mdio_support = 0;
2250         }
2251
2252         cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2253         cmd->advertising = from_fw_linkcaps(p->port_type,
2254                                             p->link_cfg.advertising);
2255         ethtool_cmd_speed_set(cmd,
2256                               netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2257         cmd->duplex = DUPLEX_FULL;
2258         cmd->autoneg = p->link_cfg.autoneg;
2259         cmd->maxtxpkt = 0;
2260         cmd->maxrxpkt = 0;
2261         return 0;
2262 }
2263
2264 static unsigned int speed_to_caps(int speed)
2265 {
2266         if (speed == SPEED_100)
2267                 return FW_PORT_CAP_SPEED_100M;
2268         if (speed == SPEED_1000)
2269                 return FW_PORT_CAP_SPEED_1G;
2270         if (speed == SPEED_10000)
2271                 return FW_PORT_CAP_SPEED_10G;
2272         return 0;
2273 }
2274
2275 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2276 {
2277         unsigned int cap;
2278         struct port_info *p = netdev_priv(dev);
2279         struct link_config *lc = &p->link_cfg;
2280         u32 speed = ethtool_cmd_speed(cmd);
2281
2282         if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
2283                 return -EINVAL;
2284
2285         if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2286                 /*
2287                  * PHY offers a single speed.  See if that's what's
2288                  * being requested.
2289                  */
2290                 if (cmd->autoneg == AUTONEG_DISABLE &&
2291                     (lc->supported & speed_to_caps(speed)))
2292                         return 0;
2293                 return -EINVAL;
2294         }
2295
2296         if (cmd->autoneg == AUTONEG_DISABLE) {
2297                 cap = speed_to_caps(speed);
2298
2299                 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
2300                     (speed == SPEED_10000))
2301                         return -EINVAL;
2302                 lc->requested_speed = cap;
2303                 lc->advertising = 0;
2304         } else {
2305                 cap = to_fw_linkcaps(cmd->advertising);
2306                 if (!(lc->supported & cap))
2307                         return -EINVAL;
2308                 lc->requested_speed = 0;
2309                 lc->advertising = cap | FW_PORT_CAP_ANEG;
2310         }
2311         lc->autoneg = cmd->autoneg;
2312
2313         if (netif_running(dev))
2314                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2315                                      lc);
2316         return 0;
2317 }
2318
2319 static void get_pauseparam(struct net_device *dev,
2320                            struct ethtool_pauseparam *epause)
2321 {
2322         struct port_info *p = netdev_priv(dev);
2323
2324         epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2325         epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2326         epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2327 }
2328
2329 static int set_pauseparam(struct net_device *dev,
2330                           struct ethtool_pauseparam *epause)
2331 {
2332         struct port_info *p = netdev_priv(dev);
2333         struct link_config *lc = &p->link_cfg;
2334
2335         if (epause->autoneg == AUTONEG_DISABLE)
2336                 lc->requested_fc = 0;
2337         else if (lc->supported & FW_PORT_CAP_ANEG)
2338                 lc->requested_fc = PAUSE_AUTONEG;
2339         else
2340                 return -EINVAL;
2341
2342         if (epause->rx_pause)
2343                 lc->requested_fc |= PAUSE_RX;
2344         if (epause->tx_pause)
2345                 lc->requested_fc |= PAUSE_TX;
2346         if (netif_running(dev))
2347                 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2348                                      lc);
2349         return 0;
2350 }
2351
2352 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2353 {
2354         const struct port_info *pi = netdev_priv(dev);
2355         const struct sge *s = &pi->adapter->sge;
2356
2357         e->rx_max_pending = MAX_RX_BUFFERS;
2358         e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2359         e->rx_jumbo_max_pending = 0;
2360         e->tx_max_pending = MAX_TXQ_ENTRIES;
2361
2362         e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2363         e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2364         e->rx_jumbo_pending = 0;
2365         e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2366 }
2367
2368 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2369 {
2370         int i;
2371         const struct port_info *pi = netdev_priv(dev);
2372         struct adapter *adapter = pi->adapter;
2373         struct sge *s = &adapter->sge;
2374
2375         if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2376             e->tx_pending > MAX_TXQ_ENTRIES ||
2377             e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2378             e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2379             e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2380                 return -EINVAL;
2381
2382         if (adapter->flags & FULL_INIT_DONE)
2383                 return -EBUSY;
2384
2385         for (i = 0; i < pi->nqsets; ++i) {
2386                 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2387                 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2388                 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2389         }
2390         return 0;
2391 }
2392
2393 static int closest_timer(const struct sge *s, int time)
2394 {
2395         int i, delta, match = 0, min_delta = INT_MAX;
2396
2397         for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2398                 delta = time - s->timer_val[i];
2399                 if (delta < 0)
2400                         delta = -delta;
2401                 if (delta < min_delta) {
2402                         min_delta = delta;
2403                         match = i;
2404                 }
2405         }
2406         return match;
2407 }
2408
2409 static int closest_thres(const struct sge *s, int thres)
2410 {
2411         int i, delta, match = 0, min_delta = INT_MAX;
2412
2413         for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2414                 delta = thres - s->counter_val[i];
2415                 if (delta < 0)
2416                         delta = -delta;
2417                 if (delta < min_delta) {
2418                         min_delta = delta;
2419                         match = i;
2420                 }
2421         }
2422         return match;
2423 }
2424
2425 /*
2426  * Return a queue's interrupt hold-off time in us.  0 means no timer.
2427  */
2428 static unsigned int qtimer_val(const struct adapter *adap,
2429                                const struct sge_rspq *q)
2430 {
2431         unsigned int idx = q->intr_params >> 1;
2432
2433         return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2434 }
2435
2436 /**
2437  *      set_rxq_intr_params - set a queue's interrupt holdoff parameters
2438  *      @adap: the adapter
2439  *      @q: the Rx queue
2440  *      @us: the hold-off time in us, or 0 to disable timer
2441  *      @cnt: the hold-off packet count, or 0 to disable counter
2442  *
2443  *      Sets an Rx queue's interrupt hold-off time and packet count.  At least
2444  *      one of the two needs to be enabled for the queue to generate interrupts.
2445  */
2446 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
2447                                unsigned int us, unsigned int cnt)
2448 {
2449         if ((us | cnt) == 0)
2450                 cnt = 1;
2451
2452         if (cnt) {
2453                 int err;
2454                 u32 v, new_idx;
2455
2456                 new_idx = closest_thres(&adap->sge, cnt);
2457                 if (q->desc && q->pktcnt_idx != new_idx) {
2458                         /* the queue has already been created, update it */
2459                         v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2460                             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2461                             FW_PARAMS_PARAM_YZ(q->cntxt_id);
2462                         err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2463                                             &new_idx);
2464                         if (err)
2465                                 return err;
2466                 }
2467                 q->pktcnt_idx = new_idx;
2468         }
2469
2470         us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2471         q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2472         return 0;
2473 }
2474
2475 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2476 {
2477         const struct port_info *pi = netdev_priv(dev);
2478         struct adapter *adap = pi->adapter;
2479         struct sge_rspq *q;
2480         int i;
2481         int r = 0;
2482
2483         for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
2484                 q = &adap->sge.ethrxq[i].rspq;
2485                 r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
2486                         c->rx_max_coalesced_frames);
2487                 if (r) {
2488                         dev_err(&dev->dev, "failed to set coalesce %d\n", r);
2489                         break;
2490                 }
2491         }
2492         return r;
2493 }
2494
2495 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2496 {
2497         const struct port_info *pi = netdev_priv(dev);
2498         const struct adapter *adap = pi->adapter;
2499         const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2500
2501         c->rx_coalesce_usecs = qtimer_val(adap, rq);
2502         c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2503                 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2504         return 0;
2505 }
2506
2507 /**
2508  *      eeprom_ptov - translate a physical EEPROM address to virtual
2509  *      @phys_addr: the physical EEPROM address
2510  *      @fn: the PCI function number
2511  *      @sz: size of function-specific area
2512  *
2513  *      Translate a physical EEPROM address to virtual.  The first 1K is
2514  *      accessed through virtual addresses starting at 31K, the rest is
2515  *      accessed through virtual addresses starting at 0.
2516  *
2517  *      The mapping is as follows:
2518  *      [0..1K) -> [31K..32K)
2519  *      [1K..1K+A) -> [31K-A..31K)
2520  *      [1K+A..ES) -> [0..ES-A-1K)
2521  *
2522  *      where A = @fn * @sz, and ES = EEPROM size.
2523  */
2524 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2525 {
2526         fn *= sz;
2527         if (phys_addr < 1024)
2528                 return phys_addr + (31 << 10);
2529         if (phys_addr < 1024 + fn)
2530                 return 31744 - fn + phys_addr - 1024;
2531         if (phys_addr < EEPROMSIZE)
2532                 return phys_addr - 1024 - fn;
2533         return -EINVAL;
2534 }
2535
2536 /*
2537  * The next two routines implement eeprom read/write from physical addresses.
2538  */
2539 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2540 {
2541         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2542
2543         if (vaddr >= 0)
2544                 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2545         return vaddr < 0 ? vaddr : 0;
2546 }
2547
2548 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2549 {
2550         int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2551
2552         if (vaddr >= 0)
2553                 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2554         return vaddr < 0 ? vaddr : 0;
2555 }
2556
2557 #define EEPROM_MAGIC 0x38E2F10C
2558
2559 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2560                       u8 *data)
2561 {
2562         int i, err = 0;
2563         struct adapter *adapter = netdev2adap(dev);
2564
2565         u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2566         if (!buf)
2567                 return -ENOMEM;
2568
2569         e->magic = EEPROM_MAGIC;
2570         for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2571                 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2572
2573         if (!err)
2574                 memcpy(data, buf + e->offset, e->len);
2575         kfree(buf);
2576         return err;
2577 }
2578
2579 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2580                       u8 *data)
2581 {
2582         u8 *buf;
2583         int err = 0;
2584         u32 aligned_offset, aligned_len, *p;
2585         struct adapter *adapter = netdev2adap(dev);
2586
2587         if (eeprom->magic != EEPROM_MAGIC)
2588                 return -EINVAL;
2589
2590         aligned_offset = eeprom->offset & ~3;
2591         aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2592
2593         if (adapter->fn > 0) {
2594                 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2595
2596                 if (aligned_offset < start ||
2597                     aligned_offset + aligned_len > start + EEPROMPFSIZE)
2598                         return -EPERM;
2599         }
2600
2601         if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2602                 /*
2603                  * RMW possibly needed for first or last words.
2604                  */
2605                 buf = kmalloc(aligned_len, GFP_KERNEL);
2606                 if (!buf)
2607                         return -ENOMEM;
2608                 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2609                 if (!err && aligned_len > 4)
2610                         err = eeprom_rd_phys(adapter,
2611                                              aligned_offset + aligned_len - 4,
2612                                              (u32 *)&buf[aligned_len - 4]);
2613                 if (err)
2614                         goto out;
2615                 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2616         } else
2617                 buf = data;
2618
2619         err = t4_seeprom_wp(adapter, false);
2620         if (err)
2621                 goto out;
2622
2623         for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2624                 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2625                 aligned_offset += 4;
2626         }
2627
2628         if (!err)
2629                 err = t4_seeprom_wp(adapter, true);
2630 out:
2631         if (buf != data)
2632                 kfree(buf);
2633         return err;
2634 }
2635
2636 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2637 {
2638         int ret;
2639         const struct firmware *fw;
2640         struct adapter *adap = netdev2adap(netdev);
2641
2642         ef->data[sizeof(ef->data) - 1] = '\0';
2643         ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2644         if (ret < 0)
2645                 return ret;
2646
2647         ret = t4_load_fw(adap, fw->data, fw->size);
2648         release_firmware(fw);
2649         if (!ret)
2650                 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
2651         return ret;
2652 }
2653
2654 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2655 #define BCAST_CRC 0xa0ccc1a6
2656
2657 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2658 {
2659         wol->supported = WAKE_BCAST | WAKE_MAGIC;
2660         wol->wolopts = netdev2adap(dev)->wol;
2661         memset(&wol->sopass, 0, sizeof(wol->sopass));
2662 }
2663
2664 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2665 {
2666         int err = 0;
2667         struct port_info *pi = netdev_priv(dev);
2668
2669         if (wol->wolopts & ~WOL_SUPPORTED)
2670                 return -EINVAL;
2671         t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2672                             (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2673         if (wol->wolopts & WAKE_BCAST) {
2674                 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2675                                         ~0ULL, 0, false);
2676                 if (!err)
2677                         err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2678                                                 ~6ULL, ~0ULL, BCAST_CRC, true);
2679         } else
2680                 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2681         return err;
2682 }
2683
2684 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2685 {
2686         const struct port_info *pi = netdev_priv(dev);
2687         netdev_features_t changed = dev->features ^ features;
2688         int err;
2689
2690         if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2691                 return 0;
2692
2693         err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2694                             -1, -1, -1,
2695                             !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2696         if (unlikely(err))
2697                 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2698         return err;
2699 }
2700
2701 static u32 get_rss_table_size(struct net_device *dev)
2702 {
2703         const struct port_info *pi = netdev_priv(dev);
2704
2705         return pi->rss_size;
2706 }
2707
2708 static int get_rss_table(struct net_device *dev, u32 *p)
2709 {
2710         const struct port_info *pi = netdev_priv(dev);
2711         unsigned int n = pi->rss_size;
2712
2713         while (n--)
2714                 p[n] = pi->rss[n];
2715         return 0;
2716 }
2717
2718 static int set_rss_table(struct net_device *dev, const u32 *p)
2719 {
2720         unsigned int i;
2721         struct port_info *pi = netdev_priv(dev);
2722
2723         for (i = 0; i < pi->rss_size; i++)
2724                 pi->rss[i] = p[i];
2725         if (pi->adapter->flags & FULL_INIT_DONE)
2726                 return write_rss(pi, pi->rss);
2727         return 0;
2728 }
2729
2730 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2731                      u32 *rules)
2732 {
2733         const struct port_info *pi = netdev_priv(dev);
2734
2735         switch (info->cmd) {
2736         case ETHTOOL_GRXFH: {
2737                 unsigned int v = pi->rss_mode;
2738
2739                 info->data = 0;
2740                 switch (info->flow_type) {
2741                 case TCP_V4_FLOW:
2742                         if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2743                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2744                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2745                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2746                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2747                         break;
2748                 case UDP_V4_FLOW:
2749                         if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2750                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2751                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2752                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2753                         else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2754                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2755                         break;
2756                 case SCTP_V4_FLOW:
2757                 case AH_ESP_V4_FLOW:
2758                 case IPV4_FLOW:
2759                         if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2760                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2761                         break;
2762                 case TCP_V6_FLOW:
2763                         if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2764                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2765                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2766                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2767                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2768                         break;
2769                 case UDP_V6_FLOW:
2770                         if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2771                             (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2772                                 info->data = RXH_IP_SRC | RXH_IP_DST |
2773                                              RXH_L4_B_0_1 | RXH_L4_B_2_3;
2774                         else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2775                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2776                         break;
2777                 case SCTP_V6_FLOW:
2778                 case AH_ESP_V6_FLOW:
2779                 case IPV6_FLOW:
2780                         if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2781                                 info->data = RXH_IP_SRC | RXH_IP_DST;
2782                         break;
2783                 }
2784                 return 0;
2785         }
2786         case ETHTOOL_GRXRINGS:
2787                 info->data = pi->nqsets;
2788                 return 0;
2789         }
2790         return -EOPNOTSUPP;
2791 }
2792
2793 static const struct ethtool_ops cxgb_ethtool_ops = {
2794         .get_settings      = get_settings,
2795         .set_settings      = set_settings,
2796         .get_drvinfo       = get_drvinfo,
2797         .get_msglevel      = get_msglevel,
2798         .set_msglevel      = set_msglevel,
2799         .get_ringparam     = get_sge_param,
2800         .set_ringparam     = set_sge_param,
2801         .get_coalesce      = get_coalesce,
2802         .set_coalesce      = set_coalesce,
2803         .get_eeprom_len    = get_eeprom_len,
2804         .get_eeprom        = get_eeprom,
2805         .set_eeprom        = set_eeprom,
2806         .get_pauseparam    = get_pauseparam,
2807         .set_pauseparam    = set_pauseparam,
2808         .get_link          = ethtool_op_get_link,
2809         .get_strings       = get_strings,
2810         .set_phys_id       = identify_port,
2811         .nway_reset        = restart_autoneg,
2812         .get_sset_count    = get_sset_count,
2813         .get_ethtool_stats = get_stats,
2814         .get_regs_len      = get_regs_len,
2815         .get_regs          = get_regs,
2816         .get_wol           = get_wol,
2817         .set_wol           = set_wol,
2818         .get_rxnfc         = get_rxnfc,
2819         .get_rxfh_indir_size = get_rss_table_size,
2820         .get_rxfh_indir    = get_rss_table,
2821         .set_rxfh_indir    = set_rss_table,
2822         .flash_device      = set_flash,
2823 };
2824
2825 /*
2826  * debugfs support
2827  */
2828 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2829                         loff_t *ppos)
2830 {
2831         loff_t pos = *ppos;
2832         loff_t avail = file_inode(file)->i_size;
2833         unsigned int mem = (uintptr_t)file->private_data & 3;
2834         struct adapter *adap = file->private_data - mem;
2835
2836         if (pos < 0)
2837                 return -EINVAL;
2838         if (pos >= avail)
2839                 return 0;
2840         if (count > avail - pos)
2841                 count = avail - pos;
2842
2843         while (count) {
2844                 size_t len;
2845                 int ret, ofst;
2846                 __be32 data[16];
2847
2848                 if ((mem == MEM_MC) || (mem == MEM_MC1))
2849                         ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2850                 else
2851                         ret = t4_edc_read(adap, mem, pos, data, NULL);
2852                 if (ret)
2853                         return ret;
2854
2855                 ofst = pos % sizeof(data);
2856                 len = min(count, sizeof(data) - ofst);
2857                 if (copy_to_user(buf, (u8 *)data + ofst, len))
2858                         return -EFAULT;
2859
2860                 buf += len;
2861                 pos += len;
2862                 count -= len;
2863         }
2864         count = pos - *ppos;
2865         *ppos = pos;
2866         return count;
2867 }
2868
2869 static const struct file_operations mem_debugfs_fops = {
2870         .owner   = THIS_MODULE,
2871         .open    = simple_open,
2872         .read    = mem_read,
2873         .llseek  = default_llseek,
2874 };
2875
2876 static void add_debugfs_mem(struct adapter *adap, const char *name,
2877                             unsigned int idx, unsigned int size_mb)
2878 {
2879         struct dentry *de;
2880
2881         de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2882                                  (void *)adap + idx, &mem_debugfs_fops);
2883         if (de && de->d_inode)
2884                 de->d_inode->i_size = size_mb << 20;
2885 }
2886
2887 static int setup_debugfs(struct adapter *adap)
2888 {
2889         int i;
2890         u32 size;
2891
2892         if (IS_ERR_OR_NULL(adap->debugfs_root))
2893                 return -1;
2894
2895         i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2896         if (i & EDRAM0_ENABLE) {
2897                 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2898                 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2899         }
2900         if (i & EDRAM1_ENABLE) {
2901                 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2902                 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2903         }
2904         if (is_t4(adap->params.chip)) {
2905                 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2906                 if (i & EXT_MEM_ENABLE)
2907                         add_debugfs_mem(adap, "mc", MEM_MC,
2908                                         EXT_MEM_SIZE_GET(size));
2909         } else {
2910                 if (i & EXT_MEM_ENABLE) {
2911                         size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2912                         add_debugfs_mem(adap, "mc0", MEM_MC0,
2913                                         EXT_MEM_SIZE_GET(size));
2914                 }
2915                 if (i & EXT_MEM1_ENABLE) {
2916                         size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2917                         add_debugfs_mem(adap, "mc1", MEM_MC1,
2918                                         EXT_MEM_SIZE_GET(size));
2919                 }
2920         }
2921         if (adap->l2t)
2922                 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2923                                     &t4_l2t_fops);
2924         return 0;
2925 }
2926
2927 /*
2928  * upper-layer driver support
2929  */
2930
2931 /*
2932  * Allocate an active-open TID and set it to the supplied value.
2933  */
2934 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2935 {
2936         int atid = -1;
2937
2938         spin_lock_bh(&t->atid_lock);
2939         if (t->afree) {
2940                 union aopen_entry *p = t->afree;
2941
2942                 atid = (p - t->atid_tab) + t->atid_base;
2943                 t->afree = p->next;
2944                 p->data = data;
2945                 t->atids_in_use++;
2946         }
2947         spin_unlock_bh(&t->atid_lock);
2948         return atid;
2949 }
2950 EXPORT_SYMBOL(cxgb4_alloc_atid);
2951
2952 /*
2953  * Release an active-open TID.
2954  */
2955 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2956 {
2957         union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
2958
2959         spin_lock_bh(&t->atid_lock);
2960         p->next = t->afree;
2961         t->afree = p;
2962         t->atids_in_use--;
2963         spin_unlock_bh(&t->atid_lock);
2964 }
2965 EXPORT_SYMBOL(cxgb4_free_atid);
2966
2967 /*
2968  * Allocate a server TID and set it to the supplied value.
2969  */
2970 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2971 {
2972         int stid;
2973
2974         spin_lock_bh(&t->stid_lock);
2975         if (family == PF_INET) {
2976                 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2977                 if (stid < t->nstids)
2978                         __set_bit(stid, t->stid_bmap);
2979                 else
2980                         stid = -1;
2981         } else {
2982                 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2983                 if (stid < 0)
2984                         stid = -1;
2985         }
2986         if (stid >= 0) {
2987                 t->stid_tab[stid].data = data;
2988                 stid += t->stid_base;
2989                 t->stids_in_use++;
2990         }
2991         spin_unlock_bh(&t->stid_lock);
2992         return stid;
2993 }
2994 EXPORT_SYMBOL(cxgb4_alloc_stid);
2995
2996 /* Allocate a server filter TID and set it to the supplied value.
2997  */
2998 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
2999 {
3000         int stid;
3001
3002         spin_lock_bh(&t->stid_lock);
3003         if (family == PF_INET) {
3004                 stid = find_next_zero_bit(t->stid_bmap,
3005                                 t->nstids + t->nsftids, t->nstids);
3006                 if (stid < (t->nstids + t->nsftids))
3007                         __set_bit(stid, t->stid_bmap);
3008                 else
3009                         stid = -1;
3010         } else {
3011                 stid = -1;
3012         }
3013         if (stid >= 0) {
3014                 t->stid_tab[stid].data = data;
3015                 stid += t->stid_base;
3016                 t->stids_in_use++;
3017         }
3018         spin_unlock_bh(&t->stid_lock);
3019         return stid;
3020 }
3021 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3022
3023 /* Release a server TID.
3024  */
3025 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3026 {
3027         stid -= t->stid_base;
3028         spin_lock_bh(&t->stid_lock);
3029         if (family == PF_INET)
3030                 __clear_bit(stid, t->stid_bmap);
3031         else
3032                 bitmap_release_region(t->stid_bmap, stid, 2);
3033         t->stid_tab[stid].data = NULL;
3034         t->stids_in_use--;
3035         spin_unlock_bh(&t->stid_lock);
3036 }
3037 EXPORT_SYMBOL(cxgb4_free_stid);
3038
3039 /*
3040  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
3041  */
3042 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3043                            unsigned int tid)
3044 {
3045         struct cpl_tid_release *req;
3046
3047         set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3048         req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3049         INIT_TP_WR(req, tid);
3050         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3051 }
3052
3053 /*
3054  * Queue a TID release request and if necessary schedule a work queue to
3055  * process it.
3056  */
3057 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3058                                     unsigned int tid)
3059 {
3060         void **p = &t->tid_tab[tid];
3061         struct adapter *adap = container_of(t, struct adapter, tids);
3062
3063         spin_lock_bh(&adap->tid_release_lock);
3064         *p = adap->tid_release_head;
3065         /* Low 2 bits encode the Tx channel number */
3066         adap->tid_release_head = (void **)((uintptr_t)p | chan);
3067         if (!adap->tid_release_task_busy) {
3068                 adap->tid_release_task_busy = true;
3069                 queue_work(workq, &adap->tid_release_task);
3070         }
3071         spin_unlock_bh(&adap->tid_release_lock);
3072 }
3073
3074 /*
3075  * Process the list of pending TID release requests.
3076  */
3077 static void process_tid_release_list(struct work_struct *work)
3078 {
3079         struct sk_buff *skb;
3080         struct adapter *adap;
3081
3082         adap = container_of(work, struct adapter, tid_release_task);
3083
3084         spin_lock_bh(&adap->tid_release_lock);
3085         while (adap->tid_release_head) {
3086                 void **p = adap->tid_release_head;
3087                 unsigned int chan = (uintptr_t)p & 3;
3088                 p = (void *)p - chan;
3089
3090                 adap->tid_release_head = *p;
3091                 *p = NULL;
3092                 spin_unlock_bh(&adap->tid_release_lock);
3093
3094                 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3095                                          GFP_KERNEL)))
3096                         schedule_timeout_uninterruptible(1);
3097
3098                 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3099                 t4_ofld_send(adap, skb);
3100                 spin_lock_bh(&adap->tid_release_lock);
3101         }
3102         adap->tid_release_task_busy = false;
3103         spin_unlock_bh(&adap->tid_release_lock);
3104 }
3105
3106 /*
3107  * Release a TID and inform HW.  If we are unable to allocate the release
3108  * message we defer to a work queue.
3109  */
3110 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3111 {
3112         void *old;
3113         struct sk_buff *skb;
3114         struct adapter *adap = container_of(t, struct adapter, tids);
3115
3116         old = t->tid_tab[tid];
3117         skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3118         if (likely(skb)) {
3119                 t->tid_tab[tid] = NULL;
3120                 mk_tid_release(skb, chan, tid);
3121                 t4_ofld_send(adap, skb);
3122         } else
3123                 cxgb4_queue_tid_release(t, chan, tid);
3124         if (old)
3125                 atomic_dec(&t->tids_in_use);
3126 }
3127 EXPORT_SYMBOL(cxgb4_remove_tid);
3128
3129 /*
3130  * Allocate and initialize the TID tables.  Returns 0 on success.
3131  */
3132 static int tid_init(struct tid_info *t)
3133 {
3134         size_t size;
3135         unsigned int stid_bmap_size;
3136         unsigned int natids = t->natids;
3137
3138         stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3139         size = t->ntids * sizeof(*t->tid_tab) +
3140                natids * sizeof(*t->atid_tab) +
3141                t->nstids * sizeof(*t->stid_tab) +
3142                t->nsftids * sizeof(*t->stid_tab) +
3143                stid_bmap_size * sizeof(long) +
3144                t->nftids * sizeof(*t->ftid_tab) +
3145                t->nsftids * sizeof(*t->ftid_tab);
3146
3147         t->tid_tab = t4_alloc_mem(size);
3148         if (!t->tid_tab)
3149                 return -ENOMEM;
3150
3151         t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3152         t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3153         t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3154         t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3155         spin_lock_init(&t->stid_lock);
3156         spin_lock_init(&t->atid_lock);
3157
3158         t->stids_in_use = 0;
3159         t->afree = NULL;
3160         t->atids_in_use = 0;
3161         atomic_set(&t->tids_in_use, 0);
3162
3163         /* Setup the free list for atid_tab and clear the stid bitmap. */
3164         if (natids) {
3165                 while (--natids)
3166                         t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3167                 t->afree = t->atid_tab;
3168         }
3169         bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3170         return 0;
3171 }
3172
3173 static int cxgb4_clip_get(const struct net_device *dev,
3174                           const struct in6_addr *lip)
3175 {
3176         struct adapter *adap;
3177         struct fw_clip_cmd c;
3178
3179         adap = netdev2adap(dev);
3180         memset(&c, 0, sizeof(c));
3181         c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3182                         FW_CMD_REQUEST | FW_CMD_WRITE);
3183         c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3184         *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3185         *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3186         return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3187 }
3188
3189 static int cxgb4_clip_release(const struct net_device *dev,
3190                               const struct in6_addr *lip)
3191 {
3192         struct adapter *adap;
3193         struct fw_clip_cmd c;
3194
3195         adap = netdev2adap(dev);
3196         memset(&c, 0, sizeof(c));
3197         c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3198                         FW_CMD_REQUEST | FW_CMD_READ);
3199         c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3200         *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
3201         *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3202         return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3203 }
3204
3205 /**
3206  *      cxgb4_create_server - create an IP server
3207  *      @dev: the device
3208  *      @stid: the server TID
3209  *      @sip: local IP address to bind server to
3210  *      @sport: the server's TCP port
3211  *      @queue: queue to direct messages from this server to
3212  *
3213  *      Create an IP server for the given port and address.
3214  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
3215  */
3216 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3217                         __be32 sip, __be16 sport, __be16 vlan,
3218                         unsigned int queue)
3219 {
3220         unsigned int chan;
3221         struct sk_buff *skb;
3222         struct adapter *adap;
3223         struct cpl_pass_open_req *req;
3224         int ret;
3225
3226         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3227         if (!skb)
3228                 return -ENOMEM;
3229
3230         adap = netdev2adap(dev);
3231         req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3232         INIT_TP_WR(req, 0);
3233         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3234         req->local_port = sport;
3235         req->peer_port = htons(0);
3236         req->local_ip = sip;
3237         req->peer_ip = htonl(0);
3238         chan = rxq_to_chan(&adap->sge, queue);
3239         req->opt0 = cpu_to_be64(TX_CHAN(chan));
3240         req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3241                                 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3242         ret = t4_mgmt_tx(adap, skb);
3243         return net_xmit_eval(ret);
3244 }
3245 EXPORT_SYMBOL(cxgb4_create_server);
3246
3247 /*      cxgb4_create_server6 - create an IPv6 server
3248  *      @dev: the device
3249  *      @stid: the server TID
3250  *      @sip: local IPv6 address to bind server to
3251  *      @sport: the server's TCP port
3252  *      @queue: queue to direct messages from this server to
3253  *
3254  *      Create an IPv6 server for the given port and address.
3255  *      Returns <0 on error and one of the %NET_XMIT_* values on success.
3256  */
3257 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3258                          const struct in6_addr *sip, __be16 sport,
3259                          unsigned int queue)
3260 {
3261         unsigned int chan;
3262         struct sk_buff *skb;
3263         struct adapter *adap;
3264         struct cpl_pass_open_req6 *req;
3265         int ret;
3266
3267         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3268         if (!skb)
3269                 return -ENOMEM;
3270
3271         adap = netdev2adap(dev);
3272         req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3273         INIT_TP_WR(req, 0);
3274         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3275         req->local_port = sport;
3276         req->peer_port = htons(0);
3277         req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3278         req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3279         req->peer_ip_hi = cpu_to_be64(0);
3280         req->peer_ip_lo = cpu_to_be64(0);
3281         chan = rxq_to_chan(&adap->sge, queue);
3282         req->opt0 = cpu_to_be64(TX_CHAN(chan));
3283         req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3284                                 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3285         ret = t4_mgmt_tx(adap, skb);
3286         return net_xmit_eval(ret);
3287 }
3288 EXPORT_SYMBOL(cxgb4_create_server6);
3289
3290 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3291                         unsigned int queue, bool ipv6)
3292 {
3293         struct sk_buff *skb;
3294         struct adapter *adap;
3295         struct cpl_close_listsvr_req *req;
3296         int ret;
3297
3298         adap = netdev2adap(dev);
3299
3300         skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3301         if (!skb)
3302                 return -ENOMEM;
3303
3304         req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3305         INIT_TP_WR(req, 0);
3306         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3307         req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3308                                 LISTSVR_IPV6(0)) | QUEUENO(queue));
3309         ret = t4_mgmt_tx(adap, skb);
3310         return net_xmit_eval(ret);
3311 }
3312 EXPORT_SYMBOL(cxgb4_remove_server);
3313
3314 /**
3315  *      cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3316  *      @mtus: the HW MTU table
3317  *      @mtu: the target MTU
3318  *      @idx: index of selected entry in the MTU table
3319  *
3320  *      Returns the index and the value in the HW MTU table that is closest to
3321  *      but does not exceed @mtu, unless @mtu is smaller than any value in the
3322  *      table, in which case that smallest available value is selected.
3323  */
3324 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3325                             unsigned int *idx)
3326 {
3327         unsigned int i = 0;
3328
3329         while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3330                 ++i;
3331         if (idx)
3332                 *idx = i;
3333         return mtus[i];
3334 }
3335 EXPORT_SYMBOL(cxgb4_best_mtu);
3336
3337 /**
3338  *      cxgb4_port_chan - get the HW channel of a port
3339  *      @dev: the net device for the port
3340  *
3341  *      Return the HW Tx channel of the given port.
3342  */
3343 unsigned int cxgb4_port_chan(const struct net_device *dev)
3344 {
3345         return netdev2pinfo(dev)->tx_chan;
3346 }
3347 EXPORT_SYMBOL(cxgb4_port_chan);
3348
3349 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3350 {
3351         struct adapter *adap = netdev2adap(dev);
3352         u32 v1, v2, lp_count, hp_count;
3353
3354         v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3355         v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3356         if (is_t4(adap->params.chip)) {
3357                 lp_count = G_LP_COUNT(v1);
3358                 hp_count = G_HP_COUNT(v1);
3359         } else {
3360                 lp_count = G_LP_COUNT_T5(v1);
3361                 hp_count = G_HP_COUNT_T5(v2);
3362         }
3363         return lpfifo ? lp_count : hp_count;
3364 }
3365 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3366
3367 /**
3368  *      cxgb4_port_viid - get the VI id of a port
3369  *      @dev: the net device for the port
3370  *
3371  *      Return the VI id of the given port.
3372  */
3373 unsigned int cxgb4_port_viid(const struct net_device *dev)
3374 {
3375         return netdev2pinfo(dev)->viid;
3376 }
3377 EXPORT_SYMBOL(cxgb4_port_viid);
3378
3379 /**
3380  *      cxgb4_port_idx - get the index of a port
3381  *      @dev: the net device for the port
3382  *
3383  *      Return the index of the given port.
3384  */
3385 unsigned int cxgb4_port_idx(const struct net_device *dev)
3386 {
3387         return netdev2pinfo(dev)->port_id;
3388 }
3389 EXPORT_SYMBOL(cxgb4_port_idx);
3390
3391 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3392                          struct tp_tcp_stats *v6)
3393 {
3394         struct adapter *adap = pci_get_drvdata(pdev);
3395
3396         spin_lock(&adap->stats_lock);
3397         t4_tp_get_tcp_stats(adap, v4, v6);
3398         spin_unlock(&adap->stats_lock);
3399 }
3400 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3401
3402 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3403                       const unsigned int *pgsz_order)
3404 {
3405         struct adapter *adap = netdev2adap(dev);
3406
3407         t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3408         t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3409                      HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3410                      HPZ3(pgsz_order[3]));
3411 }
3412 EXPORT_SYMBOL(cxgb4_iscsi_init);
3413
3414 int cxgb4_flush_eq_cache(struct net_device *dev)
3415 {
3416         struct adapter *adap = netdev2adap(dev);
3417         int ret;
3418
3419         ret = t4_fwaddrspace_write(adap, adap->mbox,
3420                                    0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3421         return ret;
3422 }
3423 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3424
3425 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3426 {
3427         u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3428         __be64 indices;
3429         int ret;
3430
3431         ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
3432         if (!ret) {
3433                 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3434                 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3435         }
3436         return ret;
3437 }
3438
3439 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3440                         u16 size)
3441 {
3442         struct adapter *adap = netdev2adap(dev);
3443         u16 hw_pidx, hw_cidx;
3444         int ret;
3445
3446         ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3447         if (ret)
3448                 goto out;
3449
3450         if (pidx != hw_pidx) {
3451                 u16 delta;
3452
3453                 if (pidx >= hw_pidx)
3454                         delta = pidx - hw_pidx;
3455                 else
3456                         delta = size - hw_pidx + pidx;
3457                 wmb();
3458                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3459                              QID(qid) | PIDX(delta));
3460         }
3461 out:
3462         return ret;
3463 }
3464 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3465
3466 void cxgb4_disable_db_coalescing(struct net_device *dev)
3467 {
3468         struct adapter *adap;
3469
3470         adap = netdev2adap(dev);
3471         t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3472                          F_NOCOALESCE);
3473 }
3474 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3475
3476 void cxgb4_enable_db_coalescing(struct net_device *dev)
3477 {
3478         struct adapter *adap;
3479
3480         adap = netdev2adap(dev);
3481         t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3482 }
3483 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3484
3485 static struct pci_driver cxgb4_driver;
3486
3487 static void check_neigh_update(struct neighbour *neigh)
3488 {
3489         const struct device *parent;
3490         const struct net_device *netdev = neigh->dev;
3491
3492         if (netdev->priv_flags & IFF_802_1Q_VLAN)
3493                 netdev = vlan_dev_real_dev(netdev);
3494         parent = netdev->dev.parent;
3495         if (parent && parent->driver == &cxgb4_driver.driver)
3496                 t4_l2t_update(dev_get_drvdata(parent), neigh);
3497 }
3498
3499 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3500                        void *data)
3501 {
3502         switch (event) {
3503         case NETEVENT_NEIGH_UPDATE:
3504                 check_neigh_update(data);
3505                 break;
3506         case NETEVENT_REDIRECT:
3507         default:
3508                 break;
3509         }
3510         return 0;
3511 }
3512
3513 static bool netevent_registered;
3514 static struct notifier_block cxgb4_netevent_nb = {
3515         .notifier_call = netevent_cb
3516 };
3517
3518 static void drain_db_fifo(struct adapter *adap, int usecs)
3519 {
3520         u32 v1, v2, lp_count, hp_count;
3521
3522         do {
3523                 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3524                 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3525                 if (is_t4(adap->params.chip)) {
3526                         lp_count = G_LP_COUNT(v1);
3527                         hp_count = G_HP_COUNT(v1);
3528                 } else {
3529                         lp_count = G_LP_COUNT_T5(v1);
3530                         hp_count = G_HP_COUNT_T5(v2);
3531                 }
3532
3533                 if (lp_count == 0 && hp_count == 0)
3534                         break;
3535                 set_current_state(TASK_UNINTERRUPTIBLE);
3536                 schedule_timeout(usecs_to_jiffies(usecs));
3537         } while (1);
3538 }
3539
3540 static void disable_txq_db(struct sge_txq *q)
3541 {
3542         spin_lock_irq(&q->db_lock);
3543         q->db_disabled = 1;
3544         spin_unlock_irq(&q->db_lock);
3545 }
3546
3547 static void enable_txq_db(struct sge_txq *q)
3548 {
3549         spin_lock_irq(&q->db_lock);
3550         q->db_disabled = 0;
3551         spin_unlock_irq(&q->db_lock);
3552 }
3553
3554 static void disable_dbs(struct adapter *adap)
3555 {
3556         int i;
3557
3558         for_each_ethrxq(&adap->sge, i)
3559                 disable_txq_db(&adap->sge.ethtxq[i].q);
3560         for_each_ofldrxq(&adap->sge, i)
3561                 disable_txq_db(&adap->sge.ofldtxq[i].q);
3562         for_each_port(adap, i)
3563                 disable_txq_db(&adap->sge.ctrlq[i].q);
3564 }
3565
3566 static void enable_dbs(struct adapter *adap)
3567 {
3568         int i;
3569
3570         for_each_ethrxq(&adap->sge, i)
3571                 enable_txq_db(&adap->sge.ethtxq[i].q);
3572         for_each_ofldrxq(&adap->sge, i)
3573                 enable_txq_db(&adap->sge.ofldtxq[i].q);
3574         for_each_port(adap, i)
3575                 enable_txq_db(&adap->sge.ctrlq[i].q);
3576 }
3577
3578 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
3579 {
3580         u16 hw_pidx, hw_cidx;
3581         int ret;
3582
3583         spin_lock_bh(&q->db_lock);
3584         ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
3585         if (ret)
3586                 goto out;
3587         if (q->db_pidx != hw_pidx) {
3588                 u16 delta;
3589
3590                 if (q->db_pidx >= hw_pidx)
3591                         delta = q->db_pidx - hw_pidx;
3592                 else
3593                         delta = q->size - hw_pidx + q->db_pidx;
3594                 wmb();
3595                 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3596                              QID(q->cntxt_id) | PIDX(delta));
3597         }
3598 out:
3599         q->db_disabled = 0;
3600         spin_unlock_bh(&q->db_lock);
3601         if (ret)
3602                 CH_WARN(adap, "DB drop recovery failed.\n");
3603 }
3604 static void recover_all_queues(struct adapter *adap)
3605 {
3606         int i;
3607
3608         for_each_ethrxq(&adap->sge, i)
3609                 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
3610         for_each_ofldrxq(&adap->sge, i)
3611                 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
3612         for_each_port(adap, i)
3613                 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
3614 }
3615
3616 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
3617 {
3618         mutex_lock(&uld_mutex);
3619         if (adap->uld_handle[CXGB4_ULD_RDMA])
3620                 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
3621                                 cmd);
3622         mutex_unlock(&uld_mutex);
3623 }
3624
3625 static void process_db_full(struct work_struct *work)
3626 {
3627         struct adapter *adap;
3628
3629         adap = container_of(work, struct adapter, db_full_task);
3630
3631         notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
3632         drain_db_fifo(adap, dbfifo_drain_delay);
3633         t4_set_reg_field(adap, SGE_INT_ENABLE3,
3634                          DBFIFO_HP_INT | DBFIFO_LP_INT,
3635                          DBFIFO_HP_INT | DBFIFO_LP_INT);
3636         notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
3637 }
3638
3639 static void process_db_drop(struct work_struct *work)
3640 {
3641         struct adapter *adap;
3642
3643         adap = container_of(work, struct adapter, db_drop_task);
3644
3645         if (is_t4(adap->params.chip)) {
3646                 disable_dbs(adap);
3647                 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3648                 drain_db_fifo(adap, 1);
3649                 recover_all_queues(adap);
3650                 enable_dbs(adap);
3651         } else {
3652                 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3653                 u16 qid = (dropped_db >> 15) & 0x1ffff;
3654                 u16 pidx_inc = dropped_db & 0x1fff;
3655                 unsigned int s_qpp;
3656                 unsigned short udb_density;
3657                 unsigned long qpshift;
3658                 int page;
3659                 u32 udb;
3660
3661                 dev_warn(adap->pdev_dev,
3662                          "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3663                          dropped_db, qid,
3664                          (dropped_db >> 14) & 1,
3665                          (dropped_db >> 13) & 1,
3666                          pidx_inc);
3667
3668                 drain_db_fifo(adap, 1);
3669
3670                 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3671                 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3672                                 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3673                 qpshift = PAGE_SHIFT - ilog2(udb_density);
3674                 udb = qid << qpshift;
3675                 udb &= PAGE_MASK;
3676                 page = udb / PAGE_SIZE;
3677                 udb += (qid - (page * udb_density)) * 128;
3678
3679                 writel(PIDX(pidx_inc),  adap->bar2 + udb + 8);
3680
3681                 /* Re-enable BAR2 WC */
3682                 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3683         }
3684
3685         t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3686 }
3687
3688 void t4_db_full(struct adapter *adap)
3689 {
3690         if (is_t4(adap->params.chip)) {
3691                 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3692                                  DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3693                 queue_work(workq, &adap->db_full_task);
3694         }
3695 }
3696
3697 void t4_db_dropped(struct adapter *adap)
3698 {
3699         if (is_t4(adap->params.chip))
3700                 queue_work(workq, &adap->db_drop_task);
3701 }
3702
3703 static void uld_attach(struct adapter *adap, unsigned int uld)
3704 {
3705         void *handle;
3706         struct cxgb4_lld_info lli;
3707         unsigned short i;
3708
3709         lli.pdev = adap->pdev;
3710         lli.l2t = adap->l2t;
3711         lli.tids = &adap->tids;
3712         lli.ports = adap->port;
3713         lli.vr = &adap->vres;
3714         lli.mtus = adap->params.mtus;
3715         if (uld == CXGB4_ULD_RDMA) {
3716                 lli.rxq_ids = adap->sge.rdma_rxq;
3717                 lli.nrxq = adap->sge.rdmaqs;
3718         } else if (uld == CXGB4_ULD_ISCSI) {
3719                 lli.rxq_ids = adap->sge.ofld_rxq;
3720                 lli.nrxq = adap->sge.ofldqsets;
3721         }
3722         lli.ntxq = adap->sge.ofldqsets;
3723         lli.nchan = adap->params.nports;
3724         lli.nports = adap->params.nports;
3725         lli.wr_cred = adap->params.ofldq_wr_cred;
3726         lli.adapter_type = adap->params.chip;
3727         lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
3728         lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
3729                         t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
3730                         (adap->fn * 4));
3731         lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
3732                         t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
3733                         (adap->fn * 4));
3734         lli.filt_mode = adap->filter_mode;
3735         /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
3736         for (i = 0; i < NCHAN; i++)
3737                 lli.tx_modq[i] = i;
3738         lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
3739         lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
3740         lli.fw_vers = adap->params.fw_vers;
3741         lli.dbfifo_int_thresh = dbfifo_int_thresh;
3742         lli.sge_pktshift = adap->sge.pktshift;
3743         lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
3744
3745         handle = ulds[uld].add(&lli);
3746         if (IS_ERR(handle)) {
3747                 dev_warn(adap->pdev_dev,
3748                          "could not attach to the %s driver, error %ld\n",
3749                          uld_str[uld], PTR_ERR(handle));
3750                 return;
3751         }
3752
3753         adap->uld_handle[uld] = handle;
3754
3755         if (!netevent_registered) {
3756                 register_netevent_notifier(&cxgb4_netevent_nb);
3757                 netevent_registered = true;
3758         }
3759
3760         if (adap->flags & FULL_INIT_DONE)
3761                 ulds[uld].state_change(handle, CXGB4_STATE_UP);
3762 }
3763
3764 static void attach_ulds(struct adapter *adap)
3765 {
3766         unsigned int i;
3767
3768         spin_lock(&adap_rcu_lock);
3769         list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
3770         spin_unlock(&adap_rcu_lock);
3771
3772         mutex_lock(&uld_mutex);
3773         list_add_tail(&adap->list_node, &adapter_list);
3774         for (i = 0; i < CXGB4_ULD_MAX; i++)
3775                 if (ulds[i].add)
3776                         uld_attach(adap, i);
3777         mutex_unlock(&uld_mutex);
3778 }
3779
3780 static void detach_ulds(struct adapter *adap)
3781 {
3782         unsigned int i;
3783
3784         mutex_lock(&uld_mutex);
3785         list_del(&adap->list_node);
3786         for (i = 0; i < CXGB4_ULD_MAX; i++)
3787                 if (adap->uld_handle[i]) {
3788                         ulds[i].state_change(adap->uld_handle[i],
3789                                              CXGB4_STATE_DETACH);
3790                         adap->uld_handle[i] = NULL;
3791                 }
3792         if (netevent_registered && list_empty(&adapter_list)) {
3793                 unregister_netevent_notifier(&cxgb4_netevent_nb);
3794                 netevent_registered = false;
3795         }
3796         mutex_unlock(&uld_mutex);
3797
3798         spin_lock(&adap_rcu_lock);
3799         list_del_rcu(&adap->rcu_node);
3800         spin_unlock(&adap_rcu_lock);
3801 }
3802
3803 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
3804 {
3805         unsigned int i;
3806
3807         mutex_lock(&uld_mutex);
3808         for (i = 0; i < CXGB4_ULD_MAX; i++)
3809                 if (adap->uld_handle[i])
3810                         ulds[i].state_change(adap->uld_handle[i], new_state);
3811         mutex_unlock(&uld_mutex);
3812 }
3813
3814 /**
3815  *      cxgb4_register_uld - register an upper-layer driver
3816  *      @type: the ULD type
3817  *      @p: the ULD methods
3818  *
3819  *      Registers an upper-layer driver with this driver and notifies the ULD
3820  *      about any presently available devices that support its type.  Returns
3821  *      %-EBUSY if a ULD of the same type is already registered.
3822  */
3823 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
3824 {
3825         int ret = 0;
3826         struct adapter *adap;
3827
3828         if (type >= CXGB4_ULD_MAX)
3829                 return -EINVAL;
3830         mutex_lock(&uld_mutex);
3831         if (ulds[type].add) {
3832                 ret = -EBUSY;
3833                 goto out;
3834         }
3835         ulds[type] = *p;
3836         list_for_each_entry(adap, &adapter_list, list_node)
3837                 uld_attach(adap, type);
3838 out:    mutex_unlock(&uld_mutex);
3839         return ret;
3840 }
3841 EXPORT_SYMBOL(cxgb4_register_uld);
3842
3843 /**
3844  *      cxgb4_unregister_uld - unregister an upper-layer driver
3845  *      @type: the ULD type
3846  *
3847  *      Unregisters an existing upper-layer driver.
3848  */
3849 int cxgb4_unregister_uld(enum cxgb4_uld type)
3850 {
3851         struct adapter *adap;
3852
3853         if (type >= CXGB4_ULD_MAX)
3854                 return -EINVAL;
3855         mutex_lock(&uld_mutex);
3856         list_for_each_entry(adap, &adapter_list, list_node)
3857                 adap->uld_handle[type] = NULL;
3858         ulds[type].add = NULL;
3859         mutex_unlock(&uld_mutex);
3860         return 0;
3861 }
3862 EXPORT_SYMBOL(cxgb4_unregister_uld);
3863
3864 /* Check if netdev on which event is occured belongs to us or not. Return
3865  * suceess (1) if it belongs otherwise failure (0).
3866  */
3867 static int cxgb4_netdev(struct net_device *netdev)
3868 {
3869         struct adapter *adap;
3870         int i;
3871
3872         spin_lock(&adap_rcu_lock);
3873         list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
3874                 for (i = 0; i < MAX_NPORTS; i++)
3875                         if (adap->port[i] == netdev) {
3876                                 spin_unlock(&adap_rcu_lock);
3877                                 return 1;
3878                         }
3879         spin_unlock(&adap_rcu_lock);
3880         return 0;
3881 }
3882
3883 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
3884                     unsigned long event)
3885 {
3886         int ret = NOTIFY_DONE;
3887
3888         rcu_read_lock();
3889         if (cxgb4_netdev(event_dev)) {
3890                 switch (event) {
3891                 case NETDEV_UP:
3892                         ret = cxgb4_clip_get(event_dev,
3893                                 (const struct in6_addr *)ifa->addr.s6_addr);
3894                         if (ret < 0) {
3895                                 rcu_read_unlock();
3896                                 return ret;
3897                         }
3898                         ret = NOTIFY_OK;
3899                         break;
3900                 case NETDEV_DOWN:
3901                         cxgb4_clip_release(event_dev,
3902                                 (const struct in6_addr *)ifa->addr.s6_addr);
3903                         ret = NOTIFY_OK;
3904                         break;
3905                 default:
3906                         break;
3907                 }
3908         }
3909         rcu_read_unlock();
3910         return ret;
3911 }
3912
3913 static int cxgb4_inet6addr_handler(struct notifier_block *this,
3914                 unsigned long event, void *data)
3915 {
3916         struct inet6_ifaddr *ifa = data;
3917         struct net_device *event_dev;
3918         int ret = NOTIFY_DONE;
3919         struct bonding *bond = netdev_priv(ifa->idev->dev);
3920         struct list_head *iter;
3921         struct slave *slave;
3922         struct pci_dev *first_pdev = NULL;
3923
3924         if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
3925                 event_dev = vlan_dev_real_dev(ifa->idev->dev);
3926                 ret = clip_add(event_dev, ifa, event);
3927         } else if (ifa->idev->dev->flags & IFF_MASTER) {
3928                 /* It is possible that two different adapters are bonded in one
3929                  * bond. We need to find such different adapters and add clip
3930                  * in all of them only once.
3931                  */
3932                 read_lock(&bond->lock);
3933                 bond_for_each_slave(bond, slave, iter) {
3934                         if (!first_pdev) {
3935                                 ret = clip_add(slave->dev, ifa, event);
3936                                 /* If clip_add is success then only initialize
3937                                  * first_pdev since it means it is our device
3938                                  */
3939                                 if (ret == NOTIFY_OK)
3940                                         first_pdev = to_pci_dev(
3941                                                         slave->dev->dev.parent);
3942                         } else if (first_pdev !=
3943                                    to_pci_dev(slave->dev->dev.parent))
3944                                         ret = clip_add(slave->dev, ifa, event);
3945                 }
3946                 read_unlock(&bond->lock);
3947         } else
3948                 ret = clip_add(ifa->idev->dev, ifa, event);
3949
3950         return ret;
3951 }
3952
3953 static struct notifier_block cxgb4_inet6addr_notifier = {
3954         .notifier_call = cxgb4_inet6addr_handler
3955 };
3956
3957 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
3958  * a physical device.
3959  * The physical device reference is needed to send the actul CLIP command.
3960  */
3961 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
3962 {
3963         struct inet6_dev *idev = NULL;
3964         struct inet6_ifaddr *ifa;
3965         int ret = 0;
3966
3967         idev = __in6_dev_get(root_dev);
3968         if (!idev)
3969                 return ret;
3970
3971         read_lock_bh(&idev->lock);
3972         list_for_each_entry(ifa, &idev->addr_list, if_list) {
3973                 ret = cxgb4_clip_get(dev,
3974                                 (const struct in6_addr *)ifa->addr.s6_addr);
3975                 if (ret < 0)
3976                         break;
3977         }
3978         read_unlock_bh(&idev->lock);
3979
3980         return ret;
3981 }
3982
3983 static int update_root_dev_clip(struct net_device *dev)
3984 {
3985         struct net_device *root_dev = NULL;
3986         int i, ret = 0;
3987
3988         /* First populate the real net device's IPv6 addresses */
3989         ret = update_dev_clip(dev, dev);
3990         if (ret)
3991                 return ret;
3992
3993         /* Parse all bond and vlan devices layered on top of the physical dev */
3994         for (i = 0; i < VLAN_N_VID; i++) {
3995                 root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
3996                 if (!root_dev)
3997                         continue;
3998
3999                 ret = update_dev_clip(root_dev, dev);
4000                 if (ret)
4001                         break;
4002         }
4003         return ret;
4004 }
4005
4006 static void update_clip(const struct adapter *adap)
4007 {
4008         int i;
4009         struct net_device *dev;
4010         int ret;
4011
4012         rcu_read_lock();
4013
4014         for (i = 0; i < MAX_NPORTS; i++) {
4015                 dev = adap->port[i];
4016                 ret = 0;
4017
4018                 if (dev)
4019                         ret = update_root_dev_clip(dev);
4020
4021                 if (ret < 0)
4022                         break;
4023         }
4024         rcu_read_unlock();
4025 }
4026
4027 /**
4028  *      cxgb_up - enable the adapter
4029  *      @adap: adapter being enabled
4030  *
4031  *      Called when the first port is enabled, this function performs the
4032  *      actions necessary to make an adapter operational, such as completing
4033  *      the initialization of HW modules, and enabling interrupts.
4034  *
4035  *      Must be called with the rtnl lock held.
4036  */
4037 static int cxgb_up(struct adapter *adap)
4038 {
4039         int err;
4040
4041         err = setup_sge_queues(adap);
4042         if (err)
4043                 goto out;
4044         err = setup_rss(adap);
4045         if (err)
4046                 goto freeq;
4047
4048         if (adap->flags & USING_MSIX) {
4049                 name_msix_vecs(adap);
4050                 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4051                                   adap->msix_info[0].desc, adap);
4052                 if (err)
4053                         goto irq_err;
4054
4055                 err = request_msix_queue_irqs(adap);
4056                 if (err) {
4057                         free_irq(adap->msix_info[0].vec, adap);
4058                         goto irq_err;
4059                 }
4060         } else {
4061                 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4062                                   (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4063                                   adap->port[0]->name, adap);
4064                 if (err)
4065                         goto irq_err;
4066         }
4067         enable_rx(adap);
4068         t4_sge_start(adap);
4069         t4_intr_enable(adap);
4070         adap->flags |= FULL_INIT_DONE;
4071         notify_ulds(adap, CXGB4_STATE_UP);
4072         update_clip(adap);
4073  out:
4074         return err;
4075  irq_err:
4076         dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4077  freeq:
4078         t4_free_sge_resources(adap);
4079         goto out;
4080 }
4081
4082 static void cxgb_down(struct adapter *adapter)
4083 {
4084         t4_intr_disable(adapter);
4085         cancel_work_sync(&adapter->tid_release_task);
4086         cancel_work_sync(&adapter->db_full_task);
4087         cancel_work_sync(&adapter->db_drop_task);
4088         adapter->tid_release_task_busy = false;
4089         adapter->tid_release_head = NULL;
4090
4091         if (adapter->flags & USING_MSIX) {
4092                 free_msix_queue_irqs(adapter);
4093                 free_irq(adapter->msix_info[0].vec, adapter);
4094         } else
4095                 free_irq(adapter->pdev->irq, adapter);
4096         quiesce_rx(adapter);
4097         t4_sge_stop(adapter);
4098         t4_free_sge_resources(adapter);
4099         adapter->flags &= ~FULL_INIT_DONE;
4100 }
4101
4102 /*
4103  * net_device operations
4104  */
4105 static int cxgb_open(struct net_device *dev)
4106 {
4107         int err;
4108         struct port_info *pi = netdev_priv(dev);
4109         struct adapter *adapter = pi->adapter;
4110
4111         netif_carrier_off(dev);
4112
4113         if (!(adapter->flags & FULL_INIT_DONE)) {
4114                 err = cxgb_up(adapter);
4115                 if (err < 0)
4116                         return err;
4117         }
4118
4119         err = link_start(dev);
4120         if (!err)
4121                 netif_tx_start_all_queues(dev);
4122         return err;
4123 }
4124
4125 static int cxgb_close(struct net_device *dev)
4126 {
4127         struct port_info *pi = netdev_priv(dev);
4128         struct adapter *adapter = pi->adapter;
4129
4130         netif_tx_stop_all_queues(dev);
4131         netif_carrier_off(dev);
4132         return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4133 }
4134
4135 /* Return an error number if the indicated filter isn't writable ...
4136  */
4137 static int writable_filter(struct filter_entry *f)
4138 {
4139         if (f->locked)
4140                 return -EPERM;
4141         if (f->pending)
4142                 return -EBUSY;
4143
4144         return 0;
4145 }
4146
4147 /* Delete the filter at the specified index (if valid).  The checks for all
4148  * the common problems with doing this like the filter being locked, currently
4149  * pending in another operation, etc.
4150  */
4151 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4152 {
4153         struct filter_entry *f;
4154         int ret;
4155
4156         if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4157                 return -EINVAL;
4158
4159         f = &adapter->tids.ftid_tab[fidx];
4160         ret = writable_filter(f);
4161         if (ret)
4162                 return ret;
4163         if (f->valid)
4164                 return del_filter_wr(adapter, fidx);
4165
4166         return 0;
4167 }
4168
4169 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4170                 __be32 sip, __be16 sport, __be16 vlan,
4171                 unsigned int queue, unsigned char port, unsigned char mask)
4172 {
4173         int ret;
4174         struct filter_entry *f;
4175         struct adapter *adap;
4176         int i;
4177         u8 *val;
4178
4179         adap = netdev2adap(dev);
4180
4181         /* Adjust stid to correct filter index */
4182         stid -= adap->tids.nstids;
4183         stid += adap->tids.nftids;
4184
4185         /* Check to make sure the filter requested is writable ...
4186          */
4187         f = &adap->tids.ftid_tab[stid];
4188         ret = writable_filter(f);
4189         if (ret)
4190                 return ret;
4191
4192         /* Clear out any old resources being used by the filter before
4193          * we start constructing the new filter.
4194          */
4195         if (f->valid)
4196                 clear_filter(adap, f);
4197
4198         /* Clear out filter specifications */
4199         memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4200         f->fs.val.lport = cpu_to_be16(sport);
4201         f->fs.mask.lport  = ~0;
4202         val = (u8 *)&sip;
4203         if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4204                 for (i = 0; i < 4; i++) {
4205                         f->fs.val.lip[i] = val[i];
4206                         f->fs.mask.lip[i] = ~0;
4207                 }
4208                 if (adap->filter_mode & F_PORT) {
4209                         f->fs.val.iport = port;
4210                         f->fs.mask.iport = mask;
4211                 }
4212         }
4213
4214         f->fs.dirsteer = 1;
4215         f->fs.iq = queue;
4216         /* Mark filter as locked */
4217         f->locked = 1;
4218         f->fs.rpttid = 1;
4219
4220         ret = set_filter_wr(adap, stid);
4221         if (ret) {
4222                 clear_filter(adap, f);
4223                 return ret;
4224         }
4225
4226         return 0;
4227 }
4228 EXPORT_SYMBOL(cxgb4_create_server_filter);
4229
4230 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4231                 unsigned int queue, bool ipv6)
4232 {
4233         int ret;
4234         struct filter_entry *f;
4235         struct adapter *adap;
4236
4237         adap = netdev2adap(dev);
4238
4239         /* Adjust stid to correct filter index */
4240         stid -= adap->tids.nstids;
4241         stid += adap->tids.nftids;
4242
4243         f = &adap->tids.ftid_tab[stid];
4244         /* Unlock the filter */
4245         f->locked = 0;
4246
4247         ret = delete_filter(adap, stid);
4248         if (ret)
4249                 return ret;
4250
4251         return 0;
4252 }
4253 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4254
4255 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4256                                                 struct rtnl_link_stats64 *ns)
4257 {
4258         struct port_stats stats;
4259         struct port_info *p = netdev_priv(dev);
4260         struct adapter *adapter = p->adapter;
4261
4262         spin_lock(&adapter->stats_lock);
4263         t4_get_port_stats(adapter, p->tx_chan, &stats);
4264         spin_unlock(&adapter->stats_lock);
4265
4266         ns->tx_bytes   = stats.tx_octets;
4267         ns->tx_packets = stats.tx_frames;
4268         ns->rx_bytes   = stats.rx_octets;
4269         ns->rx_packets = stats.rx_frames;
4270         ns->multicast  = stats.rx_mcast_frames;
4271
4272         /* detailed rx_errors */
4273         ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4274                                stats.rx_runt;
4275         ns->rx_over_errors   = 0;
4276         ns->rx_crc_errors    = stats.rx_fcs_err;
4277         ns->rx_frame_errors  = stats.rx_symbol_err;
4278         ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
4279                                stats.rx_ovflow2 + stats.rx_ovflow3 +
4280                                stats.rx_trunc0 + stats.rx_trunc1 +
4281                                stats.rx_trunc2 + stats.rx_trunc3;
4282         ns->rx_missed_errors = 0;
4283
4284         /* detailed tx_errors */
4285         ns->tx_aborted_errors   = 0;
4286         ns->tx_carrier_errors   = 0;
4287         ns->tx_fifo_errors      = 0;
4288         ns->tx_heartbeat_errors = 0;
4289         ns->tx_window_errors    = 0;
4290
4291         ns->tx_errors = stats.tx_error_frames;
4292         ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4293                 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4294         return ns;
4295 }
4296
4297 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4298 {
4299         unsigned int mbox;
4300         int ret = 0, prtad, devad;
4301         struct port_info *pi = netdev_priv(dev);
4302         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4303
4304         switch (cmd) {
4305         case SIOCGMIIPHY:
4306                 if (pi->mdio_addr < 0)
4307                         return -EOPNOTSUPP;
4308                 data->phy_id = pi->mdio_addr;
4309                 break;
4310         case SIOCGMIIREG:
4311         case SIOCSMIIREG:
4312                 if (mdio_phy_id_is_c45(data->phy_id)) {
4313                         prtad = mdio_phy_id_prtad(data->phy_id);
4314                         devad = mdio_phy_id_devad(data->phy_id);
4315                 } else if (data->phy_id < 32) {
4316                         prtad = data->phy_id;
4317                         devad = 0;
4318                         data->reg_num &= 0x1f;
4319                 } else
4320                         return -EINVAL;
4321
4322                 mbox = pi->adapter->fn;
4323                 if (cmd == SIOCGMIIREG)
4324                         ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4325                                          data->reg_num, &data->val_out);
4326                 else
4327                         ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4328                                          data->reg_num, data->val_in);
4329                 break;
4330         default:
4331                 return -EOPNOTSUPP;
4332         }
4333         return ret;
4334 }
4335
4336 static void cxgb_set_rxmode(struct net_device *dev)
4337 {
4338         /* unfortunately we can't return errors to the stack */
4339         set_rxmode(dev, -1, false);
4340 }
4341
4342 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4343 {
4344         int ret;
4345         struct port_info *pi = netdev_priv(dev);
4346
4347         if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
4348                 return -EINVAL;
4349         ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4350                             -1, -1, -1, true);
4351         if (!ret)
4352                 dev->mtu = new_mtu;
4353         return ret;
4354 }
4355
4356 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4357 {
4358         int ret;
4359         struct sockaddr *addr = p;
4360         struct port_info *pi = netdev_priv(dev);
4361
4362         if (!is_valid_ether_addr(addr->sa_data))
4363                 return -EADDRNOTAVAIL;
4364
4365         ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4366                             pi->xact_addr_filt, addr->sa_data, true, true);
4367         if (ret < 0)
4368                 return ret;
4369
4370         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4371         pi->xact_addr_filt = ret;
4372         return 0;
4373 }
4374
4375 #ifdef CONFIG_NET_POLL_CONTROLLER
4376 static void cxgb_netpoll(struct net_device *dev)
4377 {
4378         struct port_info *pi = netdev_priv(dev);
4379         struct adapter *adap = pi->adapter;
4380
4381         if (adap->flags & USING_MSIX) {
4382                 int i;
4383                 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4384
4385                 for (i = pi->nqsets; i; i--, rx++)
4386                         t4_sge_intr_msix(0, &rx->rspq);
4387         } else
4388                 t4_intr_handler(adap)(0, adap);
4389 }
4390 #endif
4391
4392 static const struct net_device_ops cxgb4_netdev_ops = {
4393         .ndo_open             = cxgb_open,
4394         .ndo_stop             = cxgb_close,
4395         .ndo_start_xmit       = t4_eth_xmit,
4396         .ndo_get_stats64      = cxgb_get_stats,
4397         .ndo_set_rx_mode      = cxgb_set_rxmode,
4398         .ndo_set_mac_address  = cxgb_set_mac_addr,
4399         .ndo_set_features     = cxgb_set_features,
4400         .ndo_validate_addr    = eth_validate_addr,
4401         .ndo_do_ioctl         = cxgb_ioctl,
4402         .ndo_change_mtu       = cxgb_change_mtu,
4403 #ifdef CONFIG_NET_POLL_CONTROLLER
4404         .ndo_poll_controller  = cxgb_netpoll,
4405 #endif
4406 };
4407
4408 void t4_fatal_err(struct adapter *adap)
4409 {
4410         t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4411         t4_intr_disable(adap);
4412         dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4413 }
4414
4415 static void setup_memwin(struct adapter *adap)
4416 {
4417         u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
4418
4419         bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
4420         if (is_t4(adap->params.chip)) {
4421                 mem_win0_base = bar0 + MEMWIN0_BASE;
4422                 mem_win1_base = bar0 + MEMWIN1_BASE;
4423                 mem_win2_base = bar0 + MEMWIN2_BASE;
4424         } else {
4425                 /* For T5, only relative offset inside the PCIe BAR is passed */
4426                 mem_win0_base = MEMWIN0_BASE;
4427                 mem_win1_base = MEMWIN1_BASE_T5;
4428                 mem_win2_base = MEMWIN2_BASE_T5;
4429         }
4430         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4431                      mem_win0_base | BIR(0) |
4432                      WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4433         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4434                      mem_win1_base | BIR(0) |
4435                      WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4436         t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4437                      mem_win2_base | BIR(0) |
4438                      WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
4439 }
4440
4441 static void setup_memwin_rdma(struct adapter *adap)
4442 {
4443         if (adap->vres.ocq.size) {
4444                 unsigned int start, sz_kb;
4445
4446                 start = pci_resource_start(adap->pdev, 2) +
4447                         OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
4448                 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
4449                 t4_write_reg(adap,
4450                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
4451                              start | BIR(1) | WINDOW(ilog2(sz_kb)));
4452                 t4_write_reg(adap,
4453                              PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
4454                              adap->vres.ocq.start);
4455                 t4_read_reg(adap,
4456                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
4457         }
4458 }
4459
4460 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
4461 {
4462         u32 v;
4463         int ret;
4464
4465         /* get device capabilities */
4466         memset(c, 0, sizeof(*c));
4467         c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4468                                FW_CMD_REQUEST | FW_CMD_READ);
4469         c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
4470         ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
4471         if (ret < 0)
4472                 return ret;
4473
4474         /* select capabilities we'll be using */
4475         if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4476                 if (!vf_acls)
4477                         c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4478                 else
4479                         c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4480         } else if (vf_acls) {
4481                 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
4482                 return ret;
4483         }
4484         c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4485                                FW_CMD_REQUEST | FW_CMD_WRITE);
4486         ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
4487         if (ret < 0)
4488                 return ret;
4489
4490         ret = t4_config_glbl_rss(adap, adap->fn,
4491                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4492                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4493                                  FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
4494         if (ret < 0)
4495                 return ret;
4496
4497         ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
4498                           0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
4499         if (ret < 0)
4500                 return ret;
4501
4502         t4_sge_init(adap);
4503
4504         /* tweak some settings */
4505         t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
4506         t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
4507         t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
4508         v = t4_read_reg(adap, TP_PIO_DATA);
4509         t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
4510
4511         /* first 4 Tx modulation queues point to consecutive Tx channels */
4512         adap->params.tp.tx_modq_map = 0xE4;
4513         t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
4514                      V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
4515
4516         /* associate each Tx modulation queue with consecutive Tx channels */
4517         v = 0x84218421;
4518         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4519                           &v, 1, A_TP_TX_SCHED_HDR);
4520         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4521                           &v, 1, A_TP_TX_SCHED_FIFO);
4522         t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
4523                           &v, 1, A_TP_TX_SCHED_PCMD);
4524
4525 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
4526         if (is_offload(adap)) {
4527                 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
4528                              V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4529                              V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4530                              V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4531                              V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4532                 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
4533                              V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4534                              V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4535                              V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
4536                              V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
4537         }
4538
4539         /* get basic stuff going */
4540         return t4_early_init(adap, adap->fn);
4541 }
4542
4543 /*
4544  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
4545  */
4546 #define MAX_ATIDS 8192U
4547
4548 /*
4549  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
4550  *
4551  * If the firmware we're dealing with has Configuration File support, then
4552  * we use that to perform all configuration
4553  */
4554
4555 /*
4556  * Tweak configuration based on module parameters, etc.  Most of these have
4557  * defaults assigned to them by Firmware Configuration Files (if we're using
4558  * them) but need to be explicitly set if we're using hard-coded
4559  * initialization.  But even in the case of using Firmware Configuration
4560  * Files, we'd like to expose the ability to change these via module
4561  * parameters so these are essentially common tweaks/settings for
4562  * Configuration Files and hard-coded initialization ...
4563  */
4564 static int adap_init0_tweaks(struct adapter *adapter)
4565 {
4566         /*
4567          * Fix up various Host-Dependent Parameters like Page Size, Cache
4568          * Line Size, etc.  The firmware default is for a 4KB Page Size and
4569          * 64B Cache Line Size ...
4570          */
4571         t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
4572
4573         /*
4574          * Process module parameters which affect early initialization.
4575          */
4576         if (rx_dma_offset != 2 && rx_dma_offset != 0) {
4577                 dev_err(&adapter->pdev->dev,
4578                         "Ignoring illegal rx_dma_offset=%d, using 2\n",
4579                         rx_dma_offset);
4580                 rx_dma_offset = 2;
4581         }
4582         t4_set_reg_field(adapter, SGE_CONTROL,
4583                          PKTSHIFT_MASK,
4584                          PKTSHIFT(rx_dma_offset));
4585
4586         /*
4587          * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
4588          * adds the pseudo header itself.
4589          */
4590         t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
4591                                CSUM_HAS_PSEUDO_HDR, 0);
4592
4593         return 0;
4594 }
4595
4596 /*
4597  * Attempt to initialize the adapter via a Firmware Configuration File.
4598  */
4599 static int adap_init0_config(struct adapter *adapter, int reset)
4600 {
4601         struct fw_caps_config_cmd caps_cmd;
4602         const struct firmware *cf;
4603         unsigned long mtype = 0, maddr = 0;
4604         u32 finiver, finicsum, cfcsum;
4605         int ret;
4606         int config_issued = 0;
4607         char *fw_config_file, fw_config_file_path[256];
4608         char *config_name = NULL;
4609
4610         /*
4611          * Reset device if necessary.
4612          */
4613         if (reset) {
4614                 ret = t4_fw_reset(adapter, adapter->mbox,
4615                                   PIORSTMODE | PIORST);
4616                 if (ret < 0)
4617                         goto bye;
4618         }
4619
4620         /*
4621          * If we have a T4 configuration file under /lib/firmware/cxgb4/,
4622          * then use that.  Otherwise, use the configuration file stored
4623          * in the adapter flash ...
4624          */
4625         switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
4626         case CHELSIO_T4:
4627                 fw_config_file = FW4_CFNAME;
4628                 break;
4629         case CHELSIO_T5:
4630                 fw_config_file = FW5_CFNAME;
4631                 break;
4632         default:
4633                 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4634                        adapter->pdev->device);
4635                 ret = -EINVAL;
4636                 goto bye;
4637         }
4638
4639         ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
4640         if (ret < 0) {
4641                 config_name = "On FLASH";
4642                 mtype = FW_MEMTYPE_CF_FLASH;
4643                 maddr = t4_flash_cfg_addr(adapter);
4644         } else {
4645                 u32 params[7], val[7];
4646
4647                 sprintf(fw_config_file_path,
4648                         "/lib/firmware/%s", fw_config_file);
4649                 config_name = fw_config_file_path;
4650
4651                 if (cf->size >= FLASH_CFG_MAX_SIZE)
4652                         ret = -ENOMEM;
4653                 else {
4654                         params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
4655                              FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
4656                         ret = t4_query_params(adapter, adapter->mbox,
4657                                               adapter->fn, 0, 1, params, val);
4658                         if (ret == 0) {
4659                                 /*
4660                                  * For t4_memory_write() below addresses and
4661                                  * sizes have to be in terms of multiples of 4
4662                                  * bytes.  So, if the Configuration File isn't
4663                                  * a multiple of 4 bytes in length we'll have
4664                                  * to write that out separately since we can't
4665                                  * guarantee that the bytes following the
4666                                  * residual byte in the buffer returned by
4667                                  * request_firmware() are zeroed out ...
4668                                  */
4669                                 size_t resid = cf->size & 0x3;
4670                                 size_t size = cf->size & ~0x3;
4671                                 __be32 *data = (__be32 *)cf->data;
4672
4673                                 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
4674                                 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
4675
4676                                 ret = t4_memory_write(adapter, mtype, maddr,
4677                                                       size, data);
4678                                 if (ret == 0 && resid != 0) {
4679                                         union {
4680                                                 __be32 word;
4681                                                 char buf[4];
4682                                         } last;
4683                                         int i;
4684
4685                                         last.word = data[size >> 2];
4686                                         for (i = resid; i < 4; i++)
4687                                                 last.buf[i] = 0;
4688                                         ret = t4_memory_write(adapter, mtype,
4689                                                               maddr + size,
4690                                                               4, &last.word);
4691                                 }
4692                         }
4693                 }
4694
4695                 release_firmware(cf);
4696                 if (ret)
4697                         goto bye;
4698         }
4699
4700         /*
4701          * Issue a Capability Configuration command to the firmware to get it
4702          * to parse the Configuration File.  We don't use t4_fw_config_file()
4703          * because we want the ability to modify various features after we've
4704          * processed the configuration file ...
4705          */
4706         memset(&caps_cmd, 0, sizeof(caps_cmd));
4707         caps_cmd.op_to_write =
4708                 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4709                       FW_CMD_REQUEST |
4710                       FW_CMD_READ);
4711         caps_cmd.cfvalid_to_len16 =
4712                 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
4713                       FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
4714                       FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
4715                       FW_LEN16(caps_cmd));
4716         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4717                          &caps_cmd);
4718
4719         /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
4720          * Configuration File in FLASH), our last gasp effort is to use the
4721          * Firmware Configuration File which is embedded in the firmware.  A
4722          * very few early versions of the firmware didn't have one embedded
4723          * but we can ignore those.
4724          */
4725         if (ret == -ENOENT) {
4726                 memset(&caps_cmd, 0, sizeof(caps_cmd));
4727                 caps_cmd.op_to_write =
4728                         htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4729                                         FW_CMD_REQUEST |
4730                                         FW_CMD_READ);
4731                 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4732                 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
4733                                 sizeof(caps_cmd), &caps_cmd);
4734                 config_name = "Firmware Default";
4735         }
4736
4737         config_issued = 1;
4738         if (ret < 0)
4739                 goto bye;
4740
4741         finiver = ntohl(caps_cmd.finiver);
4742         finicsum = ntohl(caps_cmd.finicsum);
4743         cfcsum = ntohl(caps_cmd.cfcsum);
4744         if (finicsum != cfcsum)
4745                 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
4746                          "mismatch: [fini] csum=%#x, computed csum=%#x\n",
4747                          finicsum, cfcsum);
4748
4749         /*
4750          * And now tell the firmware to use the configuration we just loaded.
4751          */
4752         caps_cmd.op_to_write =
4753                 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4754                       FW_CMD_REQUEST |
4755                       FW_CMD_WRITE);
4756         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4757         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4758                          NULL);
4759         if (ret < 0)
4760                 goto bye;
4761
4762         /*
4763          * Tweak configuration based on system architecture, module
4764          * parameters, etc.
4765          */
4766         ret = adap_init0_tweaks(adapter);
4767         if (ret < 0)
4768                 goto bye;
4769
4770         /*
4771          * And finally tell the firmware to initialize itself using the
4772          * parameters from the Configuration File.
4773          */
4774         ret = t4_fw_initialize(adapter, adapter->mbox);
4775         if (ret < 0)
4776                 goto bye;
4777
4778         /*
4779          * Return successfully and note that we're operating with parameters
4780          * not supplied by the driver, rather than from hard-wired
4781          * initialization constants burried in the driver.
4782          */
4783         adapter->flags |= USING_SOFT_PARAMS;
4784         dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
4785                  "Configuration File \"%s\", version %#x, computed checksum %#x\n",
4786                  config_name, finiver, cfcsum);
4787         return 0;
4788
4789         /*
4790          * Something bad happened.  Return the error ...  (If the "error"
4791          * is that there's no Configuration File on the adapter we don't
4792          * want to issue a warning since this is fairly common.)
4793          */
4794 bye:
4795         if (config_issued && ret != -ENOENT)
4796                 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
4797                          config_name, -ret);
4798         return ret;
4799 }
4800
4801 /*
4802  * Attempt to initialize the adapter via hard-coded, driver supplied
4803  * parameters ...
4804  */
4805 static int adap_init0_no_config(struct adapter *adapter, int reset)
4806 {
4807         struct sge *s = &adapter->sge;
4808         struct fw_caps_config_cmd caps_cmd;
4809         u32 v;
4810         int i, ret;
4811
4812         /*
4813          * Reset device if necessary
4814          */
4815         if (reset) {
4816                 ret = t4_fw_reset(adapter, adapter->mbox,
4817                                   PIORSTMODE | PIORST);
4818                 if (ret < 0)
4819                         goto bye;
4820         }
4821
4822         /*
4823          * Get device capabilities and select which we'll be using.
4824          */
4825         memset(&caps_cmd, 0, sizeof(caps_cmd));
4826         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4827                                      FW_CMD_REQUEST | FW_CMD_READ);
4828         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
4829         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4830                          &caps_cmd);
4831         if (ret < 0)
4832                 goto bye;
4833
4834         if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
4835                 if (!vf_acls)
4836                         caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
4837                 else
4838                         caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
4839         } else if (vf_acls) {
4840                 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
4841                 goto bye;
4842         }
4843         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
4844                               FW_CMD_REQUEST | FW_CMD_WRITE);
4845         ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
4846                          NULL);
4847         if (ret < 0)
4848                 goto bye;
4849
4850         /*
4851          * Tweak configuration based on system architecture, module
4852          * parameters, etc.
4853          */
4854         ret = adap_init0_tweaks(adapter);
4855         if (ret < 0)
4856                 goto bye;
4857
4858         /*
4859          * Select RSS Global Mode we want to use.  We use "Basic Virtual"
4860          * mode which maps each Virtual Interface to its own section of
4861          * the RSS Table and we turn on all map and hash enables ...
4862          */
4863         adapter->flags |= RSS_TNLALLLOOKUP;
4864         ret = t4_config_glbl_rss(adapter, adapter->mbox,
4865                                  FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
4866                                  FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
4867                                  FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
4868                                  ((adapter->flags & RSS_TNLALLLOOKUP) ?
4869                                         FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
4870         if (ret < 0)
4871                 goto bye;
4872
4873         /*
4874          * Set up our own fundamental resource provisioning ...
4875          */
4876         ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
4877                           PFRES_NEQ, PFRES_NETHCTRL,
4878                           PFRES_NIQFLINT, PFRES_NIQ,
4879                           PFRES_TC, PFRES_NVI,
4880                           FW_PFVF_CMD_CMASK_MASK,
4881                           pfvfres_pmask(adapter, adapter->fn, 0),
4882                           PFRES_NEXACTF,
4883                           PFRES_R_CAPS, PFRES_WX_CAPS);
4884         if (ret < 0)
4885                 goto bye;
4886
4887         /*
4888          * Perform low level SGE initialization.  We need to do this before we
4889          * send the firmware the INITIALIZE command because that will cause
4890          * any other PF Drivers which are waiting for the Master
4891          * Initialization to proceed forward.
4892          */
4893         for (i = 0; i < SGE_NTIMERS - 1; i++)
4894                 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
4895         s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
4896         s->counter_val[0] = 1;
4897         for (i = 1; i < SGE_NCOUNTERS; i++)
4898                 s->counter_val[i] = min(intr_cnt[i - 1],
4899                                         THRESHOLD_0_GET(THRESHOLD_0_MASK));
4900         t4_sge_init(adapter);
4901
4902 #ifdef CONFIG_PCI_IOV
4903         /*
4904          * Provision resource limits for Virtual Functions.  We currently
4905          * grant them all the same static resource limits except for the Port
4906          * Access Rights Mask which we're assigning based on the PF.  All of
4907          * the static provisioning stuff for both the PF and VF really needs
4908          * to be managed in a persistent manner for each device which the
4909          * firmware controls.
4910          */
4911         {
4912                 int pf, vf;
4913
4914                 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
4915                         if (num_vf[pf] <= 0)
4916                                 continue;
4917
4918                         /* VF numbering starts at 1! */
4919                         for (vf = 1; vf <= num_vf[pf]; vf++) {
4920                                 ret = t4_cfg_pfvf(adapter, adapter->mbox,
4921                                                   pf, vf,
4922                                                   VFRES_NEQ, VFRES_NETHCTRL,
4923                                                   VFRES_NIQFLINT, VFRES_NIQ,
4924                                                   VFRES_TC, VFRES_NVI,
4925                                                   FW_PFVF_CMD_CMASK_MASK,
4926                                                   pfvfres_pmask(
4927                                                   adapter, pf, vf),
4928                                                   VFRES_NEXACTF,
4929                                                   VFRES_R_CAPS, VFRES_WX_CAPS);
4930                                 if (ret < 0)
4931                                         dev_warn(adapter->pdev_dev,
4932                                                  "failed to "\
4933                                                  "provision pf/vf=%d/%d; "
4934                                                  "err=%d\n", pf, vf, ret);
4935                         }
4936                 }
4937         }
4938 #endif
4939
4940         /*
4941          * Set up the default filter mode.  Later we'll want to implement this
4942          * via a firmware command, etc. ...  This needs to be done before the
4943          * firmare initialization command ...  If the selected set of fields
4944          * isn't equal to the default value, we'll need to make sure that the
4945          * field selections will fit in the 36-bit budget.
4946          */
4947         if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
4948                 int j, bits = 0;
4949
4950                 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
4951                         switch (tp_vlan_pri_map & (1 << j)) {
4952                         case 0:
4953                                 /* compressed filter field not enabled */
4954                                 break;
4955                         case FCOE_MASK:
4956                                 bits +=  1;
4957                                 break;
4958                         case PORT_MASK:
4959                                 bits +=  3;
4960                                 break;
4961                         case VNIC_ID_MASK:
4962                                 bits += 17;
4963                                 break;
4964                         case VLAN_MASK:
4965                                 bits += 17;
4966                                 break;
4967                         case TOS_MASK:
4968                                 bits +=  8;
4969                                 break;
4970                         case PROTOCOL_MASK:
4971                                 bits +=  8;
4972                                 break;
4973                         case ETHERTYPE_MASK:
4974                                 bits += 16;
4975                                 break;
4976                         case MACMATCH_MASK:
4977                                 bits +=  9;
4978                                 break;
4979                         case MPSHITTYPE_MASK:
4980                                 bits +=  3;
4981                                 break;
4982                         case FRAGMENTATION_MASK:
4983                                 bits +=  1;
4984                                 break;
4985                         }
4986
4987                 if (bits > 36) {
4988                         dev_err(adapter->pdev_dev,
4989                                 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
4990                                 " using %#x\n", tp_vlan_pri_map, bits,
4991                                 TP_VLAN_PRI_MAP_DEFAULT);
4992                         tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
4993                 }
4994         }
4995         v = tp_vlan_pri_map;
4996         t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
4997                           &v, 1, TP_VLAN_PRI_MAP);
4998
4999         /*
5000          * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5001          * to support any of the compressed filter fields above.  Newer
5002          * versions of the firmware do this automatically but it doesn't hurt
5003          * to set it here.  Meanwhile, we do _not_ need to set Lookup Every
5004          * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5005          * since the firmware automatically turns this on and off when we have
5006          * a non-zero number of filters active (since it does have a
5007          * performance impact).
5008          */
5009         if (tp_vlan_pri_map)
5010                 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5011                                  FIVETUPLELOOKUP_MASK,
5012                                  FIVETUPLELOOKUP_MASK);
5013
5014         /*
5015          * Tweak some settings.
5016          */
5017         t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5018                      RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5019                      PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5020                      KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5021
5022         /*
5023          * Get basic stuff going by issuing the Firmware Initialize command.
5024          * Note that this _must_ be after all PFVF commands ...
5025          */
5026         ret = t4_fw_initialize(adapter, adapter->mbox);
5027         if (ret < 0)
5028                 goto bye;
5029
5030         /*
5031          * Return successfully!
5032          */
5033         dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5034                  "driver parameters\n");
5035         return 0;
5036
5037         /*
5038          * Something bad happened.  Return the error ...
5039          */
5040 bye:
5041         return ret;
5042 }
5043
5044 static struct fw_info fw_info_array[] = {
5045         {
5046                 .chip = CHELSIO_T4,
5047                 .fs_name = FW4_CFNAME,
5048                 .fw_mod_name = FW4_FNAME,
5049                 .fw_hdr = {
5050                         .chip = FW_HDR_CHIP_T4,
5051                         .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5052                         .intfver_nic = FW_INTFVER(T4, NIC),
5053                         .intfver_vnic = FW_INTFVER(T4, VNIC),
5054                         .intfver_ri = FW_INTFVER(T4, RI),
5055                         .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5056                         .intfver_fcoe = FW_INTFVER(T4, FCOE),
5057                 },
5058         }, {
5059                 .chip = CHELSIO_T5,
5060                 .fs_name = FW5_CFNAME,
5061                 .fw_mod_name = FW5_FNAME,
5062                 .fw_hdr = {
5063                         .chip = FW_HDR_CHIP_T5,
5064                         .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5065                         .intfver_nic = FW_INTFVER(T5, NIC),
5066                         .intfver_vnic = FW_INTFVER(T5, VNIC),
5067                         .intfver_ri = FW_INTFVER(T5, RI),
5068                         .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5069                         .intfver_fcoe = FW_INTFVER(T5, FCOE),
5070                 },
5071         }
5072 };
5073
5074 static struct fw_info *find_fw_info(int chip)
5075 {
5076         int i;
5077
5078         for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5079                 if (fw_info_array[i].chip == chip)
5080                         return &fw_info_array[i];
5081         }
5082         return NULL;
5083 }
5084
5085 /*
5086  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5087  */
5088 static int adap_init0(struct adapter *adap)
5089 {
5090         int ret;
5091         u32 v, port_vec;
5092         enum dev_state state;
5093         u32 params[7], val[7];
5094         struct fw_caps_config_cmd caps_cmd;
5095         int reset = 1, j;
5096
5097         /*
5098          * Contact FW, advertising Master capability (and potentially forcing
5099          * ourselves as the Master PF if our module parameter force_init is
5100          * set).
5101          */
5102         ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5103                           force_init ? MASTER_MUST : MASTER_MAY,
5104                           &state);
5105         if (ret < 0) {
5106                 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5107                         ret);
5108                 return ret;
5109         }
5110         if (ret == adap->mbox)
5111                 adap->flags |= MASTER_PF;
5112         if (force_init && state == DEV_STATE_INIT)
5113                 state = DEV_STATE_UNINIT;
5114
5115         /*
5116          * If we're the Master PF Driver and the device is uninitialized,
5117          * then let's consider upgrading the firmware ...  (We always want
5118          * to check the firmware version number in order to A. get it for
5119          * later reporting and B. to warn if the currently loaded firmware
5120          * is excessively mismatched relative to the driver.)
5121          */
5122         t4_get_fw_version(adap, &adap->params.fw_vers);
5123         t4_get_tp_version(adap, &adap->params.tp_vers);
5124         if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5125                 struct fw_info *fw_info;
5126                 struct fw_hdr *card_fw;
5127                 const struct firmware *fw;
5128                 const u8 *fw_data = NULL;
5129                 unsigned int fw_size = 0;
5130
5131                 /* This is the firmware whose headers the driver was compiled
5132                  * against
5133                  */
5134                 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5135                 if (fw_info == NULL) {
5136                         dev_err(adap->pdev_dev,
5137                                 "unable to get firmware info for chip %d.\n",
5138                                 CHELSIO_CHIP_VERSION(adap->params.chip));
5139                         return -EINVAL;
5140                 }
5141
5142                 /* allocate memory to read the header of the firmware on the
5143                  * card
5144                  */
5145                 card_fw = t4_alloc_mem(sizeof(*card_fw));
5146
5147                 /* Get FW from from /lib/firmware/ */
5148                 ret = request_firmware(&fw, fw_info->fw_mod_name,
5149                                        adap->pdev_dev);
5150                 if (ret < 0) {
5151                         dev_err(adap->pdev_dev,
5152                                 "unable to load firmware image %s, error %d\n",
5153                                 fw_info->fw_mod_name, ret);
5154                 } else {
5155                         fw_data = fw->data;
5156                         fw_size = fw->size;
5157                 }
5158
5159                 /* upgrade FW logic */
5160                 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5161                                  state, &reset);
5162
5163                 /* Cleaning up */
5164                 if (fw != NULL)
5165                         release_firmware(fw);
5166                 t4_free_mem(card_fw);
5167
5168                 if (ret < 0)
5169                         goto bye;
5170         }
5171
5172         /*
5173          * Grab VPD parameters.  This should be done after we establish a
5174          * connection to the firmware since some of the VPD parameters
5175          * (notably the Core Clock frequency) are retrieved via requests to
5176          * the firmware.  On the other hand, we need these fairly early on
5177          * so we do this right after getting ahold of the firmware.
5178          */
5179         ret = get_vpd_params(adap, &adap->params.vpd);
5180         if (ret < 0)
5181                 goto bye;
5182
5183         /*
5184          * Find out what ports are available to us.  Note that we need to do
5185          * this before calling adap_init0_no_config() since it needs nports
5186          * and portvec ...
5187          */
5188         v =
5189             FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5190             FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5191         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5192         if (ret < 0)
5193                 goto bye;
5194
5195         adap->params.nports = hweight32(port_vec);
5196         adap->params.portvec = port_vec;
5197
5198         /*
5199          * If the firmware is initialized already (and we're not forcing a
5200          * master initialization), note that we're living with existing
5201          * adapter parameters.  Otherwise, it's time to try initializing the
5202          * adapter ...
5203          */
5204         if (state == DEV_STATE_INIT) {
5205                 dev_info(adap->pdev_dev, "Coming up as %s: "\
5206                          "Adapter already initialized\n",
5207                          adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5208                 adap->flags |= USING_SOFT_PARAMS;
5209         } else {
5210                 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5211                          "Initializing adapter\n");
5212
5213                 /*
5214                  * If the firmware doesn't support Configuration
5215                  * Files warn user and exit,
5216                  */
5217                 if (ret < 0)
5218                         dev_warn(adap->pdev_dev, "Firmware doesn't support "
5219                                  "configuration file.\n");
5220                 if (force_old_init)
5221                         ret = adap_init0_no_config(adap, reset);
5222                 else {
5223                         /*
5224                          * Find out whether we're dealing with a version of
5225                          * the firmware which has configuration file support.
5226                          */
5227                         params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5228                                      FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5229                         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5230                                               params, val);
5231
5232                         /*
5233                          * If the firmware doesn't support Configuration
5234                          * Files, use the old Driver-based, hard-wired
5235                          * initialization.  Otherwise, try using the
5236                          * Configuration File support and fall back to the
5237                          * Driver-based initialization if there's no
5238                          * Configuration File found.
5239                          */
5240                         if (ret < 0)
5241                                 ret = adap_init0_no_config(adap, reset);
5242                         else {
5243                                 /*
5244                                  * The firmware provides us with a memory
5245                                  * buffer where we can load a Configuration
5246                                  * File from the host if we want to override
5247                                  * the Configuration File in flash.
5248                                  */
5249
5250                                 ret = adap_init0_config(adap, reset);
5251                                 if (ret == -ENOENT) {
5252                                         dev_info(adap->pdev_dev,
5253                                             "No Configuration File present "
5254                                             "on adapter. Using hard-wired "
5255                                             "configuration parameters.\n");
5256                                         ret = adap_init0_no_config(adap, reset);
5257                                 }
5258                         }
5259                 }
5260                 if (ret < 0) {
5261                         dev_err(adap->pdev_dev,
5262                                 "could not initialize adapter, error %d\n",
5263                                 -ret);
5264                         goto bye;
5265                 }
5266         }
5267
5268         /*
5269          * If we're living with non-hard-coded parameters (either from a
5270          * Firmware Configuration File or values programmed by a different PF
5271          * Driver), give the SGE code a chance to pull in anything that it
5272          * needs ...  Note that this must be called after we retrieve our VPD
5273          * parameters in order to know how to convert core ticks to seconds.
5274          */
5275         if (adap->flags & USING_SOFT_PARAMS) {
5276                 ret = t4_sge_init(adap);
5277                 if (ret < 0)
5278                         goto bye;
5279         }
5280
5281         if (is_bypass_device(adap->pdev->device))
5282                 adap->params.bypass = 1;
5283
5284         /*
5285          * Grab some of our basic fundamental operating parameters.
5286          */
5287 #define FW_PARAM_DEV(param) \
5288         (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5289         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5290
5291 #define FW_PARAM_PFVF(param) \
5292         FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5293         FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)|  \
5294         FW_PARAMS_PARAM_Y(0) | \
5295         FW_PARAMS_PARAM_Z(0)
5296
5297         params[0] = FW_PARAM_PFVF(EQ_START);
5298         params[1] = FW_PARAM_PFVF(L2T_START);
5299         params[2] = FW_PARAM_PFVF(L2T_END);
5300         params[3] = FW_PARAM_PFVF(FILTER_START);
5301         params[4] = FW_PARAM_PFVF(FILTER_END);
5302         params[5] = FW_PARAM_PFVF(IQFLINT_START);
5303         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5304         if (ret < 0)
5305                 goto bye;
5306         adap->sge.egr_start = val[0];
5307         adap->l2t_start = val[1];
5308         adap->l2t_end = val[2];
5309         adap->tids.ftid_base = val[3];
5310         adap->tids.nftids = val[4] - val[3] + 1;
5311         adap->sge.ingr_start = val[5];
5312
5313         /* query params related to active filter region */
5314         params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5315         params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5316         ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5317         /* If Active filter size is set we enable establishing
5318          * offload connection through firmware work request
5319          */
5320         if ((val[0] != val[1]) && (ret >= 0)) {
5321                 adap->flags |= FW_OFLD_CONN;
5322                 adap->tids.aftid_base = val[0];
5323                 adap->tids.aftid_end = val[1];
5324         }
5325
5326         /* If we're running on newer firmware, let it know that we're
5327          * prepared to deal with encapsulated CPL messages.  Older
5328          * firmware won't understand this and we'll just get
5329          * unencapsulated messages ...
5330          */
5331         params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5332         val[0] = 1;
5333         (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5334
5335         /*
5336          * Get device capabilities so we can determine what resources we need
5337          * to manage.
5338          */
5339         memset(&caps_cmd, 0, sizeof(caps_cmd));
5340         caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5341                                      FW_CMD_REQUEST | FW_CMD_READ);
5342         caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5343         ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5344                          &caps_cmd);
5345         if (ret < 0)
5346                 goto bye;
5347
5348         if (caps_cmd.ofldcaps) {
5349                 /* query offload-related parameters */
5350                 params[0] = FW_PARAM_DEV(NTID);
5351                 params[1] = FW_PARAM_PFVF(SERVER_START);
5352                 params[2] = FW_PARAM_PFVF(SERVER_END);
5353                 params[3] = FW_PARAM_PFVF(TDDP_START);
5354                 params[4] = FW_PARAM_PFVF(TDDP_END);
5355                 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5356                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5357                                       params, val);
5358                 if (ret < 0)
5359                         goto bye;
5360                 adap->tids.ntids = val[0];
5361                 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5362                 adap->tids.stid_base = val[1];
5363                 adap->tids.nstids = val[2] - val[1] + 1;
5364                 /*
5365                  * Setup server filter region. Divide the availble filter
5366                  * region into two parts. Regular filters get 1/3rd and server
5367                  * filters get 2/3rd part. This is only enabled if workarond
5368                  * path is enabled.
5369                  * 1. For regular filters.
5370                  * 2. Server filter: This are special filters which are used
5371                  * to redirect SYN packets to offload queue.
5372                  */
5373                 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5374                         adap->tids.sftid_base = adap->tids.ftid_base +
5375                                         DIV_ROUND_UP(adap->tids.nftids, 3);
5376                         adap->tids.nsftids = adap->tids.nftids -
5377                                          DIV_ROUND_UP(adap->tids.nftids, 3);
5378                         adap->tids.nftids = adap->tids.sftid_base -
5379                                                 adap->tids.ftid_base;
5380                 }
5381                 adap->vres.ddp.start = val[3];
5382                 adap->vres.ddp.size = val[4] - val[3] + 1;
5383                 adap->params.ofldq_wr_cred = val[5];
5384
5385                 adap->params.offload = 1;
5386         }
5387         if (caps_cmd.rdmacaps) {
5388                 params[0] = FW_PARAM_PFVF(STAG_START);
5389                 params[1] = FW_PARAM_PFVF(STAG_END);
5390                 params[2] = FW_PARAM_PFVF(RQ_START);
5391                 params[3] = FW_PARAM_PFVF(RQ_END);
5392                 params[4] = FW_PARAM_PFVF(PBL_START);
5393                 params[5] = FW_PARAM_PFVF(PBL_END);
5394                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5395                                       params, val);
5396                 if (ret < 0)
5397                         goto bye;
5398                 adap->vres.stag.start = val[0];
5399                 adap->vres.stag.size = val[1] - val[0] + 1;
5400                 adap->vres.rq.start = val[2];
5401                 adap->vres.rq.size = val[3] - val[2] + 1;
5402                 adap->vres.pbl.start = val[4];
5403                 adap->vres.pbl.size = val[5] - val[4] + 1;
5404
5405                 params[0] = FW_PARAM_PFVF(SQRQ_START);
5406                 params[1] = FW_PARAM_PFVF(SQRQ_END);
5407                 params[2] = FW_PARAM_PFVF(CQ_START);
5408                 params[3] = FW_PARAM_PFVF(CQ_END);
5409                 params[4] = FW_PARAM_PFVF(OCQ_START);
5410                 params[5] = FW_PARAM_PFVF(OCQ_END);
5411                 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
5412                 if (ret < 0)
5413                         goto bye;
5414                 adap->vres.qp.start = val[0];
5415                 adap->vres.qp.size = val[1] - val[0] + 1;
5416                 adap->vres.cq.start = val[2];
5417                 adap->vres.cq.size = val[3] - val[2] + 1;
5418                 adap->vres.ocq.start = val[4];
5419                 adap->vres.ocq.size = val[5] - val[4] + 1;
5420         }
5421         if (caps_cmd.iscsicaps) {
5422                 params[0] = FW_PARAM_PFVF(ISCSI_START);
5423                 params[1] = FW_PARAM_PFVF(ISCSI_END);
5424                 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
5425                                       params, val);
5426                 if (ret < 0)
5427                         goto bye;
5428                 adap->vres.iscsi.start = val[0];
5429                 adap->vres.iscsi.size = val[1] - val[0] + 1;
5430         }
5431 #undef FW_PARAM_PFVF
5432 #undef FW_PARAM_DEV
5433
5434         /*
5435          * These are finalized by FW initialization, load their values now.
5436          */
5437         v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
5438         adap->params.tp.tre = TIMERRESOLUTION_GET(v);
5439         adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
5440         t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
5441         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5442                      adap->params.b_wnd);
5443
5444         /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
5445         for (j = 0; j < NCHAN; j++)
5446                 adap->params.tp.tx_modq[j] = j;
5447
5448         t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5449                          &adap->filter_mode, 1,
5450                          TP_VLAN_PRI_MAP);
5451
5452         adap->flags |= FW_OK;
5453         return 0;
5454
5455         /*
5456          * Something bad happened.  If a command timed out or failed with EIO
5457          * FW does not operate within its spec or something catastrophic
5458          * happened to HW/FW, stop issuing commands.
5459          */
5460 bye:
5461         if (ret != -ETIMEDOUT && ret != -EIO)
5462                 t4_fw_bye(adap, adap->mbox);
5463         return ret;
5464 }
5465
5466 /* EEH callbacks */
5467
5468 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
5469                                          pci_channel_state_t state)
5470 {
5471         int i;
5472         struct adapter *adap = pci_get_drvdata(pdev);
5473
5474         if (!adap)
5475                 goto out;
5476
5477         rtnl_lock();
5478         adap->flags &= ~FW_OK;
5479         notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
5480         for_each_port(adap, i) {
5481                 struct net_device *dev = adap->port[i];
5482
5483                 netif_device_detach(dev);
5484                 netif_carrier_off(dev);
5485         }
5486         if (adap->flags & FULL_INIT_DONE)
5487                 cxgb_down(adap);
5488         rtnl_unlock();
5489         pci_disable_device(pdev);
5490 out:    return state == pci_channel_io_perm_failure ?
5491                 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
5492 }
5493
5494 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
5495 {
5496         int i, ret;
5497         struct fw_caps_config_cmd c;
5498         struct adapter *adap = pci_get_drvdata(pdev);
5499
5500         if (!adap) {
5501                 pci_restore_state(pdev);
5502                 pci_save_state(pdev);
5503                 return PCI_ERS_RESULT_RECOVERED;
5504         }
5505
5506         if (pci_enable_device(pdev)) {
5507                 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
5508                 return PCI_ERS_RESULT_DISCONNECT;
5509         }
5510
5511         pci_set_master(pdev);
5512         pci_restore_state(pdev);
5513         pci_save_state(pdev);
5514         pci_cleanup_aer_uncorrect_error_status(pdev);
5515
5516         if (t4_wait_dev_ready(adap) < 0)
5517                 return PCI_ERS_RESULT_DISCONNECT;
5518         if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
5519                 return PCI_ERS_RESULT_DISCONNECT;
5520         adap->flags |= FW_OK;
5521         if (adap_init1(adap, &c))
5522                 return PCI_ERS_RESULT_DISCONNECT;
5523
5524         for_each_port(adap, i) {
5525                 struct port_info *p = adap2pinfo(adap, i);
5526
5527                 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
5528                                   NULL, NULL);
5529                 if (ret < 0)
5530                         return PCI_ERS_RESULT_DISCONNECT;
5531                 p->viid = ret;
5532                 p->xact_addr_filt = -1;
5533         }
5534
5535         t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
5536                      adap->params.b_wnd);
5537         setup_memwin(adap);
5538         if (cxgb_up(adap))
5539                 return PCI_ERS_RESULT_DISCONNECT;
5540         return PCI_ERS_RESULT_RECOVERED;
5541 }
5542
5543 static void eeh_resume(struct pci_dev *pdev)
5544 {
5545         int i;
5546         struct adapter *adap = pci_get_drvdata(pdev);
5547
5548         if (!adap)
5549                 return;
5550
5551         rtnl_lock();
5552         for_each_port(adap, i) {
5553                 struct net_device *dev = adap->port[i];
5554
5555                 if (netif_running(dev)) {
5556                         link_start(dev);
5557                         cxgb_set_rxmode(dev);
5558                 }
5559                 netif_device_attach(dev);
5560         }
5561         rtnl_unlock();
5562 }
5563
5564 static const struct pci_error_handlers cxgb4_eeh = {
5565         .error_detected = eeh_err_detected,
5566         .slot_reset     = eeh_slot_reset,
5567         .resume         = eeh_resume,
5568 };
5569
5570 static inline bool is_10g_port(const struct link_config *lc)
5571 {
5572         return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
5573 }
5574
5575 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
5576                              unsigned int size, unsigned int iqe_size)
5577 {
5578         q->intr_params = QINTR_TIMER_IDX(timer_idx) |
5579                          (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
5580         q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
5581         q->iqe_len = iqe_size;
5582         q->size = size;
5583 }
5584
5585 /*
5586  * Perform default configuration of DMA queues depending on the number and type
5587  * of ports we found and the number of available CPUs.  Most settings can be
5588  * modified by the admin prior to actual use.
5589  */
5590 static void cfg_queues(struct adapter *adap)
5591 {
5592         struct sge *s = &adap->sge;
5593         int i, q10g = 0, n10g = 0, qidx = 0;
5594
5595         for_each_port(adap, i)
5596                 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
5597
5598         /*
5599          * We default to 1 queue per non-10G port and up to # of cores queues
5600          * per 10G port.
5601          */
5602         if (n10g)
5603                 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
5604         if (q10g > netif_get_num_default_rss_queues())
5605                 q10g = netif_get_num_default_rss_queues();
5606
5607         for_each_port(adap, i) {
5608                 struct port_info *pi = adap2pinfo(adap, i);
5609
5610                 pi->first_qset = qidx;
5611                 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
5612                 qidx += pi->nqsets;
5613         }
5614
5615         s->ethqsets = qidx;
5616         s->max_ethqsets = qidx;   /* MSI-X may lower it later */
5617
5618         if (is_offload(adap)) {
5619                 /*
5620                  * For offload we use 1 queue/channel if all ports are up to 1G,
5621                  * otherwise we divide all available queues amongst the channels
5622                  * capped by the number of available cores.
5623                  */
5624                 if (n10g) {
5625                         i = min_t(int, ARRAY_SIZE(s->ofldrxq),
5626                                   num_online_cpus());
5627                         s->ofldqsets = roundup(i, adap->params.nports);
5628                 } else
5629                         s->ofldqsets = adap->params.nports;
5630                 /* For RDMA one Rx queue per channel suffices */
5631                 s->rdmaqs = adap->params.nports;
5632         }
5633
5634         for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
5635                 struct sge_eth_rxq *r = &s->ethrxq[i];
5636
5637                 init_rspq(&r->rspq, 0, 0, 1024, 64);
5638                 r->fl.size = 72;
5639         }
5640
5641         for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
5642                 s->ethtxq[i].q.size = 1024;
5643
5644         for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
5645                 s->ctrlq[i].q.size = 512;
5646
5647         for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
5648                 s->ofldtxq[i].q.size = 1024;
5649
5650         for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
5651                 struct sge_ofld_rxq *r = &s->ofldrxq[i];
5652
5653                 init_rspq(&r->rspq, 0, 0, 1024, 64);
5654                 r->rspq.uld = CXGB4_ULD_ISCSI;
5655                 r->fl.size = 72;
5656         }
5657
5658         for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
5659                 struct sge_ofld_rxq *r = &s->rdmarxq[i];
5660
5661                 init_rspq(&r->rspq, 0, 0, 511, 64);
5662                 r->rspq.uld = CXGB4_ULD_RDMA;
5663                 r->fl.size = 72;
5664         }
5665
5666         init_rspq(&s->fw_evtq, 6, 0, 512, 64);
5667         init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
5668 }
5669
5670 /*
5671  * Reduce the number of Ethernet queues across all ports to at most n.
5672  * n provides at least one queue per port.
5673  */
5674 static void reduce_ethqs(struct adapter *adap, int n)
5675 {
5676         int i;
5677         struct port_info *pi;
5678
5679         while (n < adap->sge.ethqsets)
5680                 for_each_port(adap, i) {
5681                         pi = adap2pinfo(adap, i);
5682                         if (pi->nqsets > 1) {
5683                                 pi->nqsets--;
5684                                 adap->sge.ethqsets--;
5685                                 if (adap->sge.ethqsets <= n)
5686                                         break;
5687                         }
5688                 }
5689
5690         n = 0;
5691         for_each_port(adap, i) {
5692                 pi = adap2pinfo(adap, i);
5693                 pi->first_qset = n;
5694                 n += pi->nqsets;
5695         }
5696 }
5697
5698 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
5699 #define EXTRA_VECS 2
5700
5701 static int enable_msix(struct adapter *adap)
5702 {
5703         int ofld_need = 0;
5704         int i, err, want, need;
5705         struct sge *s = &adap->sge;
5706         unsigned int nchan = adap->params.nports;
5707         struct msix_entry entries[MAX_INGQ + 1];
5708
5709         for (i = 0; i < ARRAY_SIZE(entries); ++i)
5710                 entries[i].entry = i;
5711
5712         want = s->max_ethqsets + EXTRA_VECS;
5713         if (is_offload(adap)) {
5714                 want += s->rdmaqs + s->ofldqsets;
5715                 /* need nchan for each possible ULD */
5716                 ofld_need = 2 * nchan;
5717         }
5718         need = adap->params.nports + EXTRA_VECS + ofld_need;
5719
5720         while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
5721                 want = err;
5722
5723         if (!err) {
5724                 /*
5725                  * Distribute available vectors to the various queue groups.
5726                  * Every group gets its minimum requirement and NIC gets top
5727                  * priority for leftovers.
5728                  */
5729                 i = want - EXTRA_VECS - ofld_need;
5730                 if (i < s->max_ethqsets) {
5731                         s->max_ethqsets = i;
5732                         if (i < s->ethqsets)
5733                                 reduce_ethqs(adap, i);
5734                 }
5735                 if (is_offload(adap)) {
5736                         i = want - EXTRA_VECS - s->max_ethqsets;
5737                         i -= ofld_need - nchan;
5738                         s->ofldqsets = (i / nchan) * nchan;  /* round down */
5739                 }
5740                 for (i = 0; i < want; ++i)
5741                         adap->msix_info[i].vec = entries[i].vector;
5742         } else if (err > 0)
5743                 dev_info(adap->pdev_dev,
5744                          "only %d MSI-X vectors left, not using MSI-X\n", err);
5745         return err;
5746 }
5747
5748 #undef EXTRA_VECS
5749
5750 static int init_rss(struct adapter *adap)
5751 {
5752         unsigned int i, j;
5753
5754         for_each_port(adap, i) {
5755                 struct port_info *pi = adap2pinfo(adap, i);
5756
5757                 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
5758                 if (!pi->rss)
5759                         return -ENOMEM;
5760                 for (j = 0; j < pi->rss_size; j++)
5761                         pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
5762         }
5763         return 0;
5764 }
5765
5766 static void print_port_info(const struct net_device *dev)
5767 {
5768         static const char *base[] = {
5769                 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
5770                 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
5771         };
5772
5773         char buf[80];
5774         char *bufp = buf;
5775         const char *spd = "";
5776         const struct port_info *pi = netdev_priv(dev);
5777         const struct adapter *adap = pi->adapter;
5778
5779         if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
5780                 spd = " 2.5 GT/s";
5781         else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
5782                 spd = " 5 GT/s";
5783
5784         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
5785                 bufp += sprintf(bufp, "100/");
5786         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
5787                 bufp += sprintf(bufp, "1000/");
5788         if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
5789                 bufp += sprintf(bufp, "10G/");
5790         if (bufp != buf)
5791                 --bufp;
5792         sprintf(bufp, "BASE-%s", base[pi->port_type]);
5793
5794         netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
5795                     adap->params.vpd.id,
5796                     CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
5797                     is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
5798                     (adap->flags & USING_MSIX) ? " MSI-X" :
5799                     (adap->flags & USING_MSI) ? " MSI" : "");
5800         netdev_info(dev, "S/N: %s, E/C: %s\n",
5801                     adap->params.vpd.sn, adap->params.vpd.ec);
5802 }
5803
5804 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
5805 {
5806         pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
5807 }
5808
5809 /*
5810  * Free the following resources:
5811  * - memory used for tables
5812  * - MSI/MSI-X
5813  * - net devices
5814  * - resources FW is holding for us
5815  */
5816 static void free_some_resources(struct adapter *adapter)
5817 {
5818         unsigned int i;
5819
5820         t4_free_mem(adapter->l2t);
5821         t4_free_mem(adapter->tids.tid_tab);
5822         disable_msi(adapter);
5823
5824         for_each_port(adapter, i)
5825                 if (adapter->port[i]) {
5826                         kfree(adap2pinfo(adapter, i)->rss);
5827                         free_netdev(adapter->port[i]);
5828                 }
5829         if (adapter->flags & FW_OK)
5830                 t4_fw_bye(adapter, adapter->fn);
5831 }
5832
5833 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
5834 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
5835                    NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5836 #define SEGMENT_SIZE 128
5837
5838 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5839 {
5840         int func, i, err, s_qpp, qpp, num_seg;
5841         struct port_info *pi;
5842         bool highdma = false;
5843         struct adapter *adapter = NULL;
5844
5845         printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
5846
5847         err = pci_request_regions(pdev, KBUILD_MODNAME);
5848         if (err) {
5849                 /* Just info, some other driver may have claimed the device. */
5850                 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
5851                 return err;
5852         }
5853
5854         /* We control everything through one PF */
5855         func = PCI_FUNC(pdev->devfn);
5856         if (func != ent->driver_data) {
5857                 pci_save_state(pdev);        /* to restore SR-IOV later */
5858                 goto sriov;
5859         }
5860
5861         err = pci_enable_device(pdev);
5862         if (err) {
5863                 dev_err(&pdev->dev, "cannot enable PCI device\n");
5864                 goto out_release_regions;
5865         }
5866
5867         if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
5868                 highdma = true;
5869                 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5870                 if (err) {
5871                         dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
5872                                 "coherent allocations\n");
5873                         goto out_disable_device;
5874                 }
5875         } else {
5876                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5877                 if (err) {
5878                         dev_err(&pdev->dev, "no usable DMA configuration\n");
5879                         goto out_disable_device;
5880                 }
5881         }
5882
5883         pci_enable_pcie_error_reporting(pdev);
5884         enable_pcie_relaxed_ordering(pdev);
5885         pci_set_master(pdev);
5886         pci_save_state(pdev);
5887
5888         adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
5889         if (!adapter) {
5890                 err = -ENOMEM;
5891                 goto out_disable_device;
5892         }
5893
5894         adapter->regs = pci_ioremap_bar(pdev, 0);
5895         if (!adapter->regs) {
5896                 dev_err(&pdev->dev, "cannot map device registers\n");
5897                 err = -ENOMEM;
5898                 goto out_free_adapter;
5899         }
5900
5901         adapter->pdev = pdev;
5902         adapter->pdev_dev = &pdev->dev;
5903         adapter->mbox = func;
5904         adapter->fn = func;
5905         adapter->msg_enable = dflt_msg_enable;
5906         memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
5907
5908         spin_lock_init(&adapter->stats_lock);
5909         spin_lock_init(&adapter->tid_release_lock);
5910
5911         INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
5912         INIT_WORK(&adapter->db_full_task, process_db_full);
5913         INIT_WORK(&adapter->db_drop_task, process_db_drop);
5914
5915         err = t4_prep_adapter(adapter);
5916         if (err)
5917                 goto out_unmap_bar0;
5918
5919         if (!is_t4(adapter->params.chip)) {
5920                 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5921                 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5922                       SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5923                 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5924
5925                 /* Each segment size is 128B. Write coalescing is enabled only
5926                  * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5927                  * queue is less no of segments that can be accommodated in
5928                  * a page size.
5929                  */
5930                 if (qpp > num_seg) {
5931                         dev_err(&pdev->dev,
5932                                 "Incorrect number of egress queues per page\n");
5933                         err = -EINVAL;
5934                         goto out_unmap_bar0;
5935                 }
5936                 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5937                 pci_resource_len(pdev, 2));
5938                 if (!adapter->bar2) {
5939                         dev_err(&pdev->dev, "cannot map device bar2 region\n");
5940                         err = -ENOMEM;
5941                         goto out_unmap_bar0;
5942                 }
5943         }
5944
5945         setup_memwin(adapter);
5946         err = adap_init0(adapter);
5947         setup_memwin_rdma(adapter);
5948         if (err)
5949                 goto out_unmap_bar;
5950
5951         for_each_port(adapter, i) {
5952                 struct net_device *netdev;
5953
5954                 netdev = alloc_etherdev_mq(sizeof(struct port_info),
5955                                            MAX_ETH_QSETS);
5956                 if (!netdev) {
5957                         err = -ENOMEM;
5958                         goto out_free_dev;
5959                 }
5960
5961                 SET_NETDEV_DEV(netdev, &pdev->dev);
5962
5963                 adapter->port[i] = netdev;
5964                 pi = netdev_priv(netdev);
5965                 pi->adapter = adapter;
5966                 pi->xact_addr_filt = -1;
5967                 pi->port_id = i;
5968                 netdev->irq = pdev->irq;
5969
5970                 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
5971                         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5972                         NETIF_F_RXCSUM | NETIF_F_RXHASH |
5973                         NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
5974                 if (highdma)
5975                         netdev->hw_features |= NETIF_F_HIGHDMA;
5976                 netdev->features |= netdev->hw_features;
5977                 netdev->vlan_features = netdev->features & VLAN_FEAT;
5978
5979                 netdev->priv_flags |= IFF_UNICAST_FLT;
5980
5981                 netdev->netdev_ops = &cxgb4_netdev_ops;
5982                 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
5983         }
5984
5985         pci_set_drvdata(pdev, adapter);
5986
5987         if (adapter->flags & FW_OK) {
5988                 err = t4_port_init(adapter, func, func, 0);
5989                 if (err)
5990                         goto out_free_dev;
5991         }
5992
5993         /*
5994          * Configure queues and allocate tables now, they can be needed as
5995          * soon as the first register_netdev completes.
5996          */
5997         cfg_queues(adapter);
5998
5999         adapter->l2t = t4_init_l2t();
6000         if (!adapter->l2t) {
6001                 /* We tolerate a lack of L2T, giving up some functionality */
6002                 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6003                 adapter->params.offload = 0;
6004         }
6005
6006         if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6007                 dev_warn(&pdev->dev, "could not allocate TID table, "
6008                          "continuing\n");
6009                 adapter->params.offload = 0;
6010         }
6011
6012         /* See what interrupts we'll be using */
6013         if (msi > 1 && enable_msix(adapter) == 0)
6014                 adapter->flags |= USING_MSIX;
6015         else if (msi > 0 && pci_enable_msi(pdev) == 0)
6016                 adapter->flags |= USING_MSI;
6017
6018         err = init_rss(adapter);
6019         if (err)
6020                 goto out_free_dev;
6021
6022         /*
6023          * The card is now ready to go.  If any errors occur during device
6024          * registration we do not fail the whole card but rather proceed only
6025          * with the ports we manage to register successfully.  However we must
6026          * register at least one net device.
6027          */
6028         for_each_port(adapter, i) {
6029                 pi = adap2pinfo(adapter, i);
6030                 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6031                 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6032
6033                 err = register_netdev(adapter->port[i]);
6034                 if (err)
6035                         break;
6036                 adapter->chan_map[pi->tx_chan] = i;
6037                 print_port_info(adapter->port[i]);
6038         }
6039         if (i == 0) {
6040                 dev_err(&pdev->dev, "could not register any net devices\n");
6041                 goto out_free_dev;
6042         }
6043         if (err) {
6044                 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6045                 err = 0;
6046         }
6047
6048         if (cxgb4_debugfs_root) {
6049                 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6050                                                            cxgb4_debugfs_root);
6051                 setup_debugfs(adapter);
6052         }
6053
6054         /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6055         pdev->needs_freset = 1;
6056
6057         if (is_offload(adapter))
6058                 attach_ulds(adapter);
6059
6060 sriov:
6061 #ifdef CONFIG_PCI_IOV
6062         if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6063                 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6064                         dev_info(&pdev->dev,
6065                                  "instantiated %u virtual functions\n",
6066                                  num_vf[func]);
6067 #endif
6068         return 0;
6069
6070  out_free_dev:
6071         free_some_resources(adapter);
6072  out_unmap_bar:
6073         if (!is_t4(adapter->params.chip))
6074                 iounmap(adapter->bar2);
6075  out_unmap_bar0:
6076         iounmap(adapter->regs);
6077  out_free_adapter:
6078         kfree(adapter);
6079  out_disable_device:
6080         pci_disable_pcie_error_reporting(pdev);
6081         pci_disable_device(pdev);
6082  out_release_regions:
6083         pci_release_regions(pdev);
6084         return err;
6085 }
6086
6087 static void remove_one(struct pci_dev *pdev)
6088 {
6089         struct adapter *adapter = pci_get_drvdata(pdev);
6090
6091 #ifdef CONFIG_PCI_IOV
6092         pci_disable_sriov(pdev);
6093
6094 #endif
6095
6096         if (adapter) {
6097                 int i;
6098
6099                 if (is_offload(adapter))
6100                         detach_ulds(adapter);
6101
6102                 for_each_port(adapter, i)
6103                         if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6104                                 unregister_netdev(adapter->port[i]);
6105
6106                 if (adapter->debugfs_root)
6107                         debugfs_remove_recursive(adapter->debugfs_root);
6108
6109                 /* If we allocated filters, free up state associated with any
6110                  * valid filters ...
6111                  */
6112                 if (adapter->tids.ftid_tab) {
6113                         struct filter_entry *f = &adapter->tids.ftid_tab[0];
6114                         for (i = 0; i < (adapter->tids.nftids +
6115                                         adapter->tids.nsftids); i++, f++)
6116                                 if (f->valid)
6117                                         clear_filter(adapter, f);
6118                 }
6119
6120                 if (adapter->flags & FULL_INIT_DONE)
6121                         cxgb_down(adapter);
6122
6123                 free_some_resources(adapter);
6124                 iounmap(adapter->regs);
6125                 if (!is_t4(adapter->params.chip))
6126                         iounmap(adapter->bar2);
6127                 kfree(adapter);
6128                 pci_disable_pcie_error_reporting(pdev);
6129                 pci_disable_device(pdev);
6130                 pci_release_regions(pdev);
6131         } else
6132                 pci_release_regions(pdev);
6133 }
6134
6135 static struct pci_driver cxgb4_driver = {
6136         .name     = KBUILD_MODNAME,
6137         .id_table = cxgb4_pci_tbl,
6138         .probe    = init_one,
6139         .remove   = remove_one,
6140         .err_handler = &cxgb4_eeh,
6141 };
6142
6143 static int __init cxgb4_init_module(void)
6144 {
6145         int ret;
6146
6147         workq = create_singlethread_workqueue("cxgb4");
6148         if (!workq)
6149                 return -ENOMEM;
6150
6151         /* Debugfs support is optional, just warn if this fails */
6152         cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6153         if (!cxgb4_debugfs_root)
6154                 pr_warn("could not create debugfs entry, continuing\n");
6155
6156         ret = pci_register_driver(&cxgb4_driver);
6157         if (ret < 0) {
6158                 debugfs_remove(cxgb4_debugfs_root);
6159                 destroy_workqueue(workq);
6160         }
6161
6162         register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6163
6164         return ret;
6165 }
6166
6167 static void __exit cxgb4_cleanup_module(void)
6168 {
6169         unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6170         pci_unregister_driver(&cxgb4_driver);
6171         debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
6172         flush_workqueue(workq);
6173         destroy_workqueue(workq);
6174 }
6175
6176 module_init(cxgb4_init_module);
6177 module_exit(cxgb4_cleanup_module);