1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2013 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/highmem.h>
39 #include <linux/uaccess.h>
42 #include "ixgbe_phy.h"
45 #define IXGBE_ALL_RAR_ENTRIES 16
47 enum {NETDEV_STATS, IXGBE_STATS};
50 char stat_string[ETH_GSTRING_LEN];
56 #define IXGBE_STAT(m) IXGBE_STATS, \
57 sizeof(((struct ixgbe_adapter *)0)->m), \
58 offsetof(struct ixgbe_adapter, m)
59 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
60 sizeof(((struct rtnl_link_stats64 *)0)->m), \
61 offsetof(struct rtnl_link_stats64, m)
63 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
64 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
65 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
66 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
67 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)},
68 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
69 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
70 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
71 {"tx_bytes_nic", IXGBE_STAT(stats.gotc)},
72 {"lsc_int", IXGBE_STAT(lsc_int)},
73 {"tx_busy", IXGBE_STAT(tx_busy)},
74 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
75 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)},
76 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)},
77 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)},
78 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)},
79 {"multicast", IXGBE_NETDEV_STAT(multicast)},
80 {"broadcast", IXGBE_STAT(stats.bprc)},
81 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
82 {"collisions", IXGBE_NETDEV_STAT(collisions)},
83 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)},
84 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)},
85 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)},
86 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)},
87 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
88 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
89 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
90 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
91 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
92 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
93 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
94 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)},
95 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)},
96 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)},
97 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
98 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
99 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
100 {"rx_short_length_errors", IXGBE_STAT(stats.ruc)},
101 {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)},
102 {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)},
103 {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)},
104 {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)},
105 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)},
106 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
107 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
108 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
109 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
110 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
111 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
112 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
114 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
115 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
116 {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)},
117 {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)},
118 {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)},
119 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)},
120 {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)},
121 {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)},
122 #endif /* IXGBE_FCOE */
125 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
126 * we set the num_rx_queues to evaluate to num_tx_queues. This is
127 * used because we do not have a good way to get the max number of
128 * rx queues with CONFIG_RPS disabled.
130 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
132 #define IXGBE_QUEUE_STATS_LEN ( \
133 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
134 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
135 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
136 #define IXGBE_PB_STATS_LEN ( \
137 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
138 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
139 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
140 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
142 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
143 IXGBE_PB_STATS_LEN + \
144 IXGBE_QUEUE_STATS_LEN)
146 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
147 "Register test (offline)", "Eeprom test (offline)",
148 "Interrupt test (offline)", "Loopback test (offline)",
149 "Link test (on/offline)"
151 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
153 static int ixgbe_get_settings(struct net_device *netdev,
154 struct ethtool_cmd *ecmd)
156 struct ixgbe_adapter *adapter = netdev_priv(netdev);
157 struct ixgbe_hw *hw = &adapter->hw;
158 ixgbe_link_speed supported_link;
160 bool autoneg = false;
163 hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
165 /* set the supported link speeds */
166 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
167 ecmd->supported |= SUPPORTED_10000baseT_Full;
168 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
169 ecmd->supported |= SUPPORTED_1000baseT_Full;
170 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
171 ecmd->supported |= SUPPORTED_100baseT_Full;
173 /* set the advertised speeds */
174 if (hw->phy.autoneg_advertised) {
175 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL)
176 ecmd->advertising |= ADVERTISED_100baseT_Full;
177 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
178 ecmd->advertising |= ADVERTISED_10000baseT_Full;
179 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
180 ecmd->advertising |= ADVERTISED_1000baseT_Full;
182 /* default modes in case phy.autoneg_advertised isn't set */
183 if (supported_link & IXGBE_LINK_SPEED_10GB_FULL)
184 ecmd->advertising |= ADVERTISED_10000baseT_Full;
185 if (supported_link & IXGBE_LINK_SPEED_1GB_FULL)
186 ecmd->advertising |= ADVERTISED_1000baseT_Full;
187 if (supported_link & IXGBE_LINK_SPEED_100_FULL)
188 ecmd->advertising |= ADVERTISED_100baseT_Full;
192 ecmd->supported |= SUPPORTED_Autoneg;
193 ecmd->advertising |= ADVERTISED_Autoneg;
194 ecmd->autoneg = AUTONEG_ENABLE;
196 ecmd->autoneg = AUTONEG_DISABLE;
198 ecmd->transceiver = XCVR_EXTERNAL;
200 /* Determine the remaining settings based on the PHY type. */
201 switch (adapter->hw.phy.type) {
204 case ixgbe_phy_cu_unknown:
205 ecmd->supported |= SUPPORTED_TP;
206 ecmd->advertising |= ADVERTISED_TP;
207 ecmd->port = PORT_TP;
210 ecmd->supported |= SUPPORTED_FIBRE;
211 ecmd->advertising |= ADVERTISED_FIBRE;
212 ecmd->port = PORT_FIBRE;
215 case ixgbe_phy_sfp_passive_tyco:
216 case ixgbe_phy_sfp_passive_unknown:
217 case ixgbe_phy_sfp_ftl:
218 case ixgbe_phy_sfp_avago:
219 case ixgbe_phy_sfp_intel:
220 case ixgbe_phy_sfp_unknown:
221 /* SFP+ devices, further checking needed */
222 switch (adapter->hw.phy.sfp_type) {
223 case ixgbe_sfp_type_da_cu:
224 case ixgbe_sfp_type_da_cu_core0:
225 case ixgbe_sfp_type_da_cu_core1:
226 ecmd->supported |= SUPPORTED_FIBRE;
227 ecmd->advertising |= ADVERTISED_FIBRE;
228 ecmd->port = PORT_DA;
230 case ixgbe_sfp_type_sr:
231 case ixgbe_sfp_type_lr:
232 case ixgbe_sfp_type_srlr_core0:
233 case ixgbe_sfp_type_srlr_core1:
234 ecmd->supported |= SUPPORTED_FIBRE;
235 ecmd->advertising |= ADVERTISED_FIBRE;
236 ecmd->port = PORT_FIBRE;
238 case ixgbe_sfp_type_not_present:
239 ecmd->supported |= SUPPORTED_FIBRE;
240 ecmd->advertising |= ADVERTISED_FIBRE;
241 ecmd->port = PORT_NONE;
243 case ixgbe_sfp_type_1g_cu_core0:
244 case ixgbe_sfp_type_1g_cu_core1:
245 ecmd->supported |= SUPPORTED_TP;
246 ecmd->advertising |= ADVERTISED_TP;
247 ecmd->port = PORT_TP;
249 case ixgbe_sfp_type_1g_sx_core0:
250 case ixgbe_sfp_type_1g_sx_core1:
251 ecmd->supported |= SUPPORTED_FIBRE;
252 ecmd->advertising |= ADVERTISED_FIBRE;
253 ecmd->port = PORT_FIBRE;
255 case ixgbe_sfp_type_unknown:
257 ecmd->supported |= SUPPORTED_FIBRE;
258 ecmd->advertising |= ADVERTISED_FIBRE;
259 ecmd->port = PORT_OTHER;
264 ecmd->supported |= SUPPORTED_FIBRE;
265 ecmd->advertising |= ADVERTISED_FIBRE;
266 ecmd->port = PORT_NONE;
268 case ixgbe_phy_unknown:
269 case ixgbe_phy_generic:
270 case ixgbe_phy_sfp_unsupported:
272 ecmd->supported |= SUPPORTED_FIBRE;
273 ecmd->advertising |= ADVERTISED_FIBRE;
274 ecmd->port = PORT_OTHER;
278 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
280 switch (link_speed) {
281 case IXGBE_LINK_SPEED_10GB_FULL:
282 ethtool_cmd_speed_set(ecmd, SPEED_10000);
284 case IXGBE_LINK_SPEED_1GB_FULL:
285 ethtool_cmd_speed_set(ecmd, SPEED_1000);
287 case IXGBE_LINK_SPEED_100_FULL:
288 ethtool_cmd_speed_set(ecmd, SPEED_100);
293 ecmd->duplex = DUPLEX_FULL;
295 ethtool_cmd_speed_set(ecmd, -1);
302 static int ixgbe_set_settings(struct net_device *netdev,
303 struct ethtool_cmd *ecmd)
305 struct ixgbe_adapter *adapter = netdev_priv(netdev);
306 struct ixgbe_hw *hw = &adapter->hw;
310 if ((hw->phy.media_type == ixgbe_media_type_copper) ||
311 (hw->phy.multispeed_fiber)) {
313 * this function does not support duplex forcing, but can
314 * limit the advertising of the adapter to the specified speed
316 if (ecmd->autoneg == AUTONEG_DISABLE)
319 if (ecmd->advertising & ~ecmd->supported)
322 old = hw->phy.autoneg_advertised;
324 if (ecmd->advertising & ADVERTISED_10000baseT_Full)
325 advertised |= IXGBE_LINK_SPEED_10GB_FULL;
327 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
328 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
330 if (ecmd->advertising & ADVERTISED_100baseT_Full)
331 advertised |= IXGBE_LINK_SPEED_100_FULL;
333 if (old == advertised)
335 /* this sets the link speed and restarts auto-neg */
336 hw->mac.autotry_restart = true;
337 err = hw->mac.ops.setup_link(hw, advertised, true);
339 e_info(probe, "setup link failed with code %d\n", err);
340 hw->mac.ops.setup_link(hw, old, true);
343 /* in this case we currently only support 10Gb/FULL */
344 u32 speed = ethtool_cmd_speed(ecmd);
345 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
346 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
347 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
354 static void ixgbe_get_pauseparam(struct net_device *netdev,
355 struct ethtool_pauseparam *pause)
357 struct ixgbe_adapter *adapter = netdev_priv(netdev);
358 struct ixgbe_hw *hw = &adapter->hw;
360 if (hw->fc.disable_fc_autoneg)
365 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
367 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
369 } else if (hw->fc.current_mode == ixgbe_fc_full) {
375 static int ixgbe_set_pauseparam(struct net_device *netdev,
376 struct ethtool_pauseparam *pause)
378 struct ixgbe_adapter *adapter = netdev_priv(netdev);
379 struct ixgbe_hw *hw = &adapter->hw;
380 struct ixgbe_fc_info fc = hw->fc;
382 /* 82598 does no support link flow control with DCB enabled */
383 if ((hw->mac.type == ixgbe_mac_82598EB) &&
384 (adapter->flags & IXGBE_FLAG_DCB_ENABLED))
387 /* some devices do not support autoneg of link flow control */
388 if ((pause->autoneg == AUTONEG_ENABLE) &&
389 (ixgbe_device_supports_autoneg_fc(hw) != 0))
392 fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
394 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
395 fc.requested_mode = ixgbe_fc_full;
396 else if (pause->rx_pause && !pause->tx_pause)
397 fc.requested_mode = ixgbe_fc_rx_pause;
398 else if (!pause->rx_pause && pause->tx_pause)
399 fc.requested_mode = ixgbe_fc_tx_pause;
401 fc.requested_mode = ixgbe_fc_none;
403 /* if the thing changed then we'll update and use new autoneg */
404 if (memcmp(&fc, &hw->fc, sizeof(struct ixgbe_fc_info))) {
406 if (netif_running(netdev))
407 ixgbe_reinit_locked(adapter);
409 ixgbe_reset(adapter);
415 static u32 ixgbe_get_msglevel(struct net_device *netdev)
417 struct ixgbe_adapter *adapter = netdev_priv(netdev);
418 return adapter->msg_enable;
421 static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
423 struct ixgbe_adapter *adapter = netdev_priv(netdev);
424 adapter->msg_enable = data;
427 static int ixgbe_get_regs_len(struct net_device *netdev)
429 #define IXGBE_REGS_LEN 1129
430 return IXGBE_REGS_LEN * sizeof(u32);
433 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
435 static void ixgbe_get_regs(struct net_device *netdev,
436 struct ethtool_regs *regs, void *p)
438 struct ixgbe_adapter *adapter = netdev_priv(netdev);
439 struct ixgbe_hw *hw = &adapter->hw;
443 memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));
445 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
447 /* General Registers */
448 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
449 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
450 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
451 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
452 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
453 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
454 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
455 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);
458 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
459 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
460 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
461 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
462 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
463 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
464 regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
465 regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
466 regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
467 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);
470 /* don't read EICR because it can clear interrupt causes, instead
471 * read EICS which is a shadow but doesn't clear EICR */
472 regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
473 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
474 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
475 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
476 regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
477 regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
478 regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
479 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
480 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
481 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
482 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
483 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);
486 regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
487 regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
488 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
489 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
490 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
491 for (i = 0; i < 8; i++) {
492 switch (hw->mac.type) {
493 case ixgbe_mac_82598EB:
494 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
495 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
497 case ixgbe_mac_82599EB:
499 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
500 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
506 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
507 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
510 for (i = 0; i < 64; i++)
511 regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
512 for (i = 0; i < 64; i++)
513 regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
514 for (i = 0; i < 64; i++)
515 regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
516 for (i = 0; i < 64; i++)
517 regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
518 for (i = 0; i < 64; i++)
519 regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
520 for (i = 0; i < 64; i++)
521 regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
522 for (i = 0; i < 16; i++)
523 regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
524 for (i = 0; i < 16; i++)
525 regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
526 regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
527 for (i = 0; i < 8; i++)
528 regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
529 regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
530 regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);
533 regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
534 regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
535 for (i = 0; i < 16; i++)
536 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
537 for (i = 0; i < 16; i++)
538 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
539 regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
540 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
541 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
542 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
543 regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
544 regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
545 for (i = 0; i < 8; i++)
546 regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
547 for (i = 0; i < 8; i++)
548 regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
549 regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);
552 for (i = 0; i < 32; i++)
553 regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
554 for (i = 0; i < 32; i++)
555 regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
556 for (i = 0; i < 32; i++)
557 regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
558 for (i = 0; i < 32; i++)
559 regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
560 for (i = 0; i < 32; i++)
561 regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
562 for (i = 0; i < 32; i++)
563 regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
564 for (i = 0; i < 32; i++)
565 regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
566 for (i = 0; i < 32; i++)
567 regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
568 regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
569 for (i = 0; i < 16; i++)
570 regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
571 regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
572 for (i = 0; i < 8; i++)
573 regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
574 regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);
577 regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
578 regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
579 regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
580 regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
581 regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
582 regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
583 regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
584 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
585 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
588 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
589 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
590 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
591 regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
592 for (i = 0; i < 8; i++)
593 regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
594 for (i = 0; i < 8; i++)
595 regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
596 for (i = 0; i < 8; i++)
597 regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
598 for (i = 0; i < 8; i++)
599 regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
600 for (i = 0; i < 8; i++)
601 regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
602 for (i = 0; i < 8; i++)
603 regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));
606 regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
607 regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
608 regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
609 regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
610 for (i = 0; i < 8; i++)
611 regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
612 regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
613 regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
614 regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
615 regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
616 regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
617 regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
618 regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
619 for (i = 0; i < 8; i++)
620 regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
621 for (i = 0; i < 8; i++)
622 regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
623 for (i = 0; i < 8; i++)
624 regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
625 for (i = 0; i < 8; i++)
626 regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
627 regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
628 regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
629 regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
630 regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
631 regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
632 regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
633 regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
634 regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
635 regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
636 regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
637 regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
638 regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
639 for (i = 0; i < 8; i++)
640 regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
641 regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
642 regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
643 regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
644 regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
645 regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
646 regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
647 regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
648 regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
649 regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
650 regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
651 regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
652 regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
653 regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
654 regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
655 regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
656 regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
657 regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
658 regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
659 regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
660 for (i = 0; i < 16; i++)
661 regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
662 for (i = 0; i < 16; i++)
663 regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
664 for (i = 0; i < 16; i++)
665 regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
666 for (i = 0; i < 16; i++)
667 regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);
670 regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
671 regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
672 regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
673 regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
674 regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
675 regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
676 regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
677 regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
678 regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
679 regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
680 regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
681 regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
682 regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
683 regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
684 regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
685 regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
686 regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
687 regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
688 regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
689 regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
690 regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
691 regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
692 regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
693 regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
694 regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
695 regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
696 regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
697 regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
698 regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
699 regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
700 regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
701 regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
702 regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
705 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
706 for (i = 0; i < 8; i++)
707 regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
708 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
709 for (i = 0; i < 4; i++)
710 regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
711 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
712 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
713 for (i = 0; i < 8; i++)
714 regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
715 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
716 for (i = 0; i < 4; i++)
717 regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
718 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
719 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
720 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
721 regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
722 regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
723 regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
724 regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
725 regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
726 regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
727 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
728 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
729 for (i = 0; i < 8; i++)
730 regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
731 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
732 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
733 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
734 regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
735 regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
736 regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
737 regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
738 regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
739 regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
741 /* 82599 X540 specific registers */
742 regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN);
745 static int ixgbe_get_eeprom_len(struct net_device *netdev)
747 struct ixgbe_adapter *adapter = netdev_priv(netdev);
748 return adapter->hw.eeprom.word_size * 2;
751 static int ixgbe_get_eeprom(struct net_device *netdev,
752 struct ethtool_eeprom *eeprom, u8 *bytes)
754 struct ixgbe_adapter *adapter = netdev_priv(netdev);
755 struct ixgbe_hw *hw = &adapter->hw;
757 int first_word, last_word, eeprom_len;
761 if (eeprom->len == 0)
764 eeprom->magic = hw->vendor_id | (hw->device_id << 16);
766 first_word = eeprom->offset >> 1;
767 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
768 eeprom_len = last_word - first_word + 1;
770 eeprom_buff = kmalloc(sizeof(u16) * eeprom_len, GFP_KERNEL);
774 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
777 /* Device's eeprom is always little-endian, word addressable */
778 for (i = 0; i < eeprom_len; i++)
779 le16_to_cpus(&eeprom_buff[i]);
781 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
787 static int ixgbe_set_eeprom(struct net_device *netdev,
788 struct ethtool_eeprom *eeprom, u8 *bytes)
790 struct ixgbe_adapter *adapter = netdev_priv(netdev);
791 struct ixgbe_hw *hw = &adapter->hw;
794 int max_len, first_word, last_word, ret_val = 0;
797 if (eeprom->len == 0)
800 if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
803 max_len = hw->eeprom.word_size * 2;
805 first_word = eeprom->offset >> 1;
806 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
807 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
813 if (eeprom->offset & 1) {
815 * need read/modify/write of first changed EEPROM word
816 * only the second byte of the word is being modified
818 ret_val = hw->eeprom.ops.read(hw, first_word, &eeprom_buff[0]);
824 if ((eeprom->offset + eeprom->len) & 1) {
826 * need read/modify/write of last changed EEPROM word
827 * only the first byte of the word is being modified
829 ret_val = hw->eeprom.ops.read(hw, last_word,
830 &eeprom_buff[last_word - first_word]);
835 /* Device's eeprom is always little-endian, word addressable */
836 for (i = 0; i < last_word - first_word + 1; i++)
837 le16_to_cpus(&eeprom_buff[i]);
839 memcpy(ptr, bytes, eeprom->len);
841 for (i = 0; i < last_word - first_word + 1; i++)
842 cpu_to_le16s(&eeprom_buff[i]);
844 ret_val = hw->eeprom.ops.write_buffer(hw, first_word,
845 last_word - first_word + 1,
848 /* Update the checksum */
850 hw->eeprom.ops.update_checksum(hw);
857 static void ixgbe_get_drvinfo(struct net_device *netdev,
858 struct ethtool_drvinfo *drvinfo)
860 struct ixgbe_adapter *adapter = netdev_priv(netdev);
863 strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
864 strlcpy(drvinfo->version, ixgbe_driver_version,
865 sizeof(drvinfo->version));
867 nvm_track_id = (adapter->eeprom_verh << 16) |
868 adapter->eeprom_verl;
869 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
872 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
873 sizeof(drvinfo->bus_info));
874 drvinfo->n_stats = IXGBE_STATS_LEN;
875 drvinfo->testinfo_len = IXGBE_TEST_LEN;
876 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
879 static void ixgbe_get_ringparam(struct net_device *netdev,
880 struct ethtool_ringparam *ring)
882 struct ixgbe_adapter *adapter = netdev_priv(netdev);
883 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
884 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
886 ring->rx_max_pending = IXGBE_MAX_RXD;
887 ring->tx_max_pending = IXGBE_MAX_TXD;
888 ring->rx_pending = rx_ring->count;
889 ring->tx_pending = tx_ring->count;
892 static int ixgbe_set_ringparam(struct net_device *netdev,
893 struct ethtool_ringparam *ring)
895 struct ixgbe_adapter *adapter = netdev_priv(netdev);
896 struct ixgbe_ring *temp_ring;
898 u32 new_rx_count, new_tx_count;
900 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
903 new_tx_count = clamp_t(u32, ring->tx_pending,
904 IXGBE_MIN_TXD, IXGBE_MAX_TXD);
905 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
907 new_rx_count = clamp_t(u32, ring->rx_pending,
908 IXGBE_MIN_RXD, IXGBE_MAX_RXD);
909 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
911 if ((new_tx_count == adapter->tx_ring_count) &&
912 (new_rx_count == adapter->rx_ring_count)) {
917 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
918 usleep_range(1000, 2000);
920 if (!netif_running(adapter->netdev)) {
921 for (i = 0; i < adapter->num_tx_queues; i++)
922 adapter->tx_ring[i]->count = new_tx_count;
923 for (i = 0; i < adapter->num_rx_queues; i++)
924 adapter->rx_ring[i]->count = new_rx_count;
925 adapter->tx_ring_count = new_tx_count;
926 adapter->rx_ring_count = new_rx_count;
930 /* allocate temporary buffer to store rings in */
931 i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues);
932 temp_ring = vmalloc(i * sizeof(struct ixgbe_ring));
942 * Setup new Tx resources and free the old Tx resources in that order.
943 * We can then assign the new resources to the rings via a memcpy.
944 * The advantage to this approach is that we are guaranteed to still
945 * have resources even in the case of an allocation failure.
947 if (new_tx_count != adapter->tx_ring_count) {
948 for (i = 0; i < adapter->num_tx_queues; i++) {
949 memcpy(&temp_ring[i], adapter->tx_ring[i],
950 sizeof(struct ixgbe_ring));
952 temp_ring[i].count = new_tx_count;
953 err = ixgbe_setup_tx_resources(&temp_ring[i]);
957 ixgbe_free_tx_resources(&temp_ring[i]);
963 for (i = 0; i < adapter->num_tx_queues; i++) {
964 ixgbe_free_tx_resources(adapter->tx_ring[i]);
966 memcpy(adapter->tx_ring[i], &temp_ring[i],
967 sizeof(struct ixgbe_ring));
970 adapter->tx_ring_count = new_tx_count;
973 /* Repeat the process for the Rx rings if needed */
974 if (new_rx_count != adapter->rx_ring_count) {
975 for (i = 0; i < adapter->num_rx_queues; i++) {
976 memcpy(&temp_ring[i], adapter->rx_ring[i],
977 sizeof(struct ixgbe_ring));
979 temp_ring[i].count = new_rx_count;
980 err = ixgbe_setup_rx_resources(&temp_ring[i]);
984 ixgbe_free_rx_resources(&temp_ring[i]);
991 for (i = 0; i < adapter->num_rx_queues; i++) {
992 ixgbe_free_rx_resources(adapter->rx_ring[i]);
994 memcpy(adapter->rx_ring[i], &temp_ring[i],
995 sizeof(struct ixgbe_ring));
998 adapter->rx_ring_count = new_rx_count;
1005 clear_bit(__IXGBE_RESETTING, &adapter->state);
1009 static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
1013 return IXGBE_TEST_LEN;
1015 return IXGBE_STATS_LEN;
1021 static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1022 struct ethtool_stats *stats, u64 *data)
1024 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1025 struct rtnl_link_stats64 temp;
1026 const struct rtnl_link_stats64 *net_stats;
1028 struct ixgbe_ring *ring;
1032 ixgbe_update_stats(adapter);
1033 net_stats = dev_get_stats(netdev, &temp);
1034 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1035 switch (ixgbe_gstrings_stats[i].type) {
1037 p = (char *) net_stats +
1038 ixgbe_gstrings_stats[i].stat_offset;
1041 p = (char *) adapter +
1042 ixgbe_gstrings_stats[i].stat_offset;
1049 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
1050 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1052 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1053 ring = adapter->tx_ring[j];
1062 start = u64_stats_fetch_begin_bh(&ring->syncp);
1063 data[i] = ring->stats.packets;
1064 data[i+1] = ring->stats.bytes;
1065 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1068 for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
1069 ring = adapter->rx_ring[j];
1078 start = u64_stats_fetch_begin_bh(&ring->syncp);
1079 data[i] = ring->stats.packets;
1080 data[i+1] = ring->stats.bytes;
1081 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1085 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1086 data[i++] = adapter->stats.pxontxc[j];
1087 data[i++] = adapter->stats.pxofftxc[j];
1089 for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
1090 data[i++] = adapter->stats.pxonrxc[j];
1091 data[i++] = adapter->stats.pxoffrxc[j];
1095 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
1098 char *p = (char *)data;
1101 switch (stringset) {
1103 for (i = 0; i < IXGBE_TEST_LEN; i++) {
1104 memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN);
1105 data += ETH_GSTRING_LEN;
1109 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
1110 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
1112 p += ETH_GSTRING_LEN;
1114 for (i = 0; i < netdev->num_tx_queues; i++) {
1115 sprintf(p, "tx_queue_%u_packets", i);
1116 p += ETH_GSTRING_LEN;
1117 sprintf(p, "tx_queue_%u_bytes", i);
1118 p += ETH_GSTRING_LEN;
1120 for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
1121 sprintf(p, "rx_queue_%u_packets", i);
1122 p += ETH_GSTRING_LEN;
1123 sprintf(p, "rx_queue_%u_bytes", i);
1124 p += ETH_GSTRING_LEN;
1126 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1127 sprintf(p, "tx_pb_%u_pxon", i);
1128 p += ETH_GSTRING_LEN;
1129 sprintf(p, "tx_pb_%u_pxoff", i);
1130 p += ETH_GSTRING_LEN;
1132 for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
1133 sprintf(p, "rx_pb_%u_pxon", i);
1134 p += ETH_GSTRING_LEN;
1135 sprintf(p, "rx_pb_%u_pxoff", i);
1136 p += ETH_GSTRING_LEN;
1138 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1143 static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
1145 struct ixgbe_hw *hw = &adapter->hw;
1150 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
1158 /* ethtool register test data */
1159 struct ixgbe_reg_test {
1167 /* In the hardware, registers are laid out either singly, in arrays
1168 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1169 * most tests take place on arrays or single registers (handled
1170 * as a single-element array) and special-case the tables.
1171 * Table tests are always pattern tests.
1173 * We also make provision for some required setup steps by specifying
1174 * registers to be written without any read-back testing.
1177 #define PATTERN_TEST 1
1178 #define SET_READ_TEST 2
1179 #define WRITE_NO_TEST 3
1180 #define TABLE32_TEST 4
1181 #define TABLE64_TEST_LO 5
1182 #define TABLE64_TEST_HI 6
1184 /* default 82599 register test */
1185 static const struct ixgbe_reg_test reg_test_82599[] = {
1186 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1187 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1188 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1189 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1190 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
1191 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1192 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1193 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1194 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1195 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1196 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1197 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1198 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1199 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1200 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
1201 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000001, 0x00000001 },
1202 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1203 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF },
1204 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1208 /* default 82598 register test */
1209 static const struct ixgbe_reg_test reg_test_82598[] = {
1210 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1211 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1212 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1213 { IXGBE_VLNCTRL, 1, PATTERN_TEST, 0x00000000, 0x00000000 },
1214 { IXGBE_RDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1215 { IXGBE_RDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1216 { IXGBE_RDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1217 /* Enable all four RX queues before testing. */
1218 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
1219 /* RDH is read-only for 82598, only test RDT. */
1220 { IXGBE_RDT(0), 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
1221 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST, 0, 0 },
1222 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1223 { IXGBE_FCTTV(0), 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1224 { IXGBE_TIPG, 1, PATTERN_TEST, 0x000000FF, 0x000000FF },
1225 { IXGBE_TDBAL(0), 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
1226 { IXGBE_TDBAH(0), 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1227 { IXGBE_TDLEN(0), 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
1228 { IXGBE_RXCTRL, 1, SET_READ_TEST, 0x00000003, 0x00000003 },
1229 { IXGBE_DTXCTL, 1, SET_READ_TEST, 0x00000005, 0x00000005 },
1230 { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
1231 { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF },
1232 { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
1236 static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1237 u32 mask, u32 write)
1239 u32 pat, val, before;
1240 static const u32 test_pattern[] = {
1241 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1243 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1244 before = readl(adapter->hw.hw_addr + reg);
1245 writel((test_pattern[pat] & write),
1246 (adapter->hw.hw_addr + reg));
1247 val = readl(adapter->hw.hw_addr + reg);
1248 if (val != (test_pattern[pat] & write & mask)) {
1249 e_err(drv, "pattern test reg %04X failed: got "
1250 "0x%08X expected 0x%08X\n",
1251 reg, val, (test_pattern[pat] & write & mask));
1253 writel(before, adapter->hw.hw_addr + reg);
1256 writel(before, adapter->hw.hw_addr + reg);
1261 static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1262 u32 mask, u32 write)
1265 before = readl(adapter->hw.hw_addr + reg);
1266 writel((write & mask), (adapter->hw.hw_addr + reg));
1267 val = readl(adapter->hw.hw_addr + reg);
1268 if ((write & mask) != (val & mask)) {
1269 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1270 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1272 writel(before, (adapter->hw.hw_addr + reg));
1275 writel(before, (adapter->hw.hw_addr + reg));
1279 #define REG_PATTERN_TEST(reg, mask, write) \
1281 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1286 #define REG_SET_AND_CHECK(reg, mask, write) \
1288 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1292 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1294 const struct ixgbe_reg_test *test;
1295 u32 value, before, after;
1298 switch (adapter->hw.mac.type) {
1299 case ixgbe_mac_82598EB:
1300 toggle = 0x7FFFF3FF;
1301 test = reg_test_82598;
1303 case ixgbe_mac_82599EB:
1304 case ixgbe_mac_X540:
1305 toggle = 0x7FFFF30F;
1306 test = reg_test_82599;
1315 * Because the status register is such a special case,
1316 * we handle it separately from the rest of the register
1317 * tests. Some bits are read-only, some toggle, and some
1318 * are writeable on newer MACs.
1320 before = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS);
1321 value = (IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle);
1322 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, toggle);
1323 after = IXGBE_READ_REG(&adapter->hw, IXGBE_STATUS) & toggle;
1324 if (value != after) {
1325 e_err(drv, "failed STATUS register test got: 0x%08X "
1326 "expected: 0x%08X\n", after, value);
1330 /* restore previous status */
1331 IXGBE_WRITE_REG(&adapter->hw, IXGBE_STATUS, before);
1334 * Perform the remainder of the register test, looping through
1335 * the test table until we either fail or reach the null entry.
1338 for (i = 0; i < test->array_len; i++) {
1339 switch (test->test_type) {
1341 REG_PATTERN_TEST(test->reg + (i * 0x40),
1346 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1352 (adapter->hw.hw_addr + test->reg)
1356 REG_PATTERN_TEST(test->reg + (i * 4),
1360 case TABLE64_TEST_LO:
1361 REG_PATTERN_TEST(test->reg + (i * 8),
1365 case TABLE64_TEST_HI:
1366 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1379 static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data)
1381 struct ixgbe_hw *hw = &adapter->hw;
1382 if (hw->eeprom.ops.validate_checksum(hw, NULL))
1389 static irqreturn_t ixgbe_test_intr(int irq, void *data)
1391 struct net_device *netdev = (struct net_device *) data;
1392 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1394 adapter->test_icr |= IXGBE_READ_REG(&adapter->hw, IXGBE_EICR);
1399 static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1401 struct net_device *netdev = adapter->netdev;
1402 u32 mask, i = 0, shared_int = true;
1403 u32 irq = adapter->pdev->irq;
1407 /* Hook up test interrupt handler just for this test */
1408 if (adapter->msix_entries) {
1409 /* NOTE: we don't test MSI-X interrupts here, yet */
1411 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1413 if (request_irq(irq, ixgbe_test_intr, 0, netdev->name,
1418 } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
1419 netdev->name, netdev)) {
1421 } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
1422 netdev->name, netdev)) {
1426 e_info(hw, "testing %s interrupt\n", shared_int ?
1427 "shared" : "unshared");
1429 /* Disable all the interrupts */
1430 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1431 IXGBE_WRITE_FLUSH(&adapter->hw);
1432 usleep_range(10000, 20000);
1434 /* Test each interrupt */
1435 for (; i < 10; i++) {
1436 /* Interrupt to test */
1441 * Disable the interrupts to be reported in
1442 * the cause register and then force the same
1443 * interrupt and see if one gets posted. If
1444 * an interrupt was posted to the bus, the
1447 adapter->test_icr = 0;
1448 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1449 ~mask & 0x00007FFF);
1450 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1451 ~mask & 0x00007FFF);
1452 IXGBE_WRITE_FLUSH(&adapter->hw);
1453 usleep_range(10000, 20000);
1455 if (adapter->test_icr & mask) {
1462 * Enable the interrupt to be reported in the cause
1463 * register and then force the same interrupt and see
1464 * if one gets posted. If an interrupt was not posted
1465 * to the bus, the test failed.
1467 adapter->test_icr = 0;
1468 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1469 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1470 IXGBE_WRITE_FLUSH(&adapter->hw);
1471 usleep_range(10000, 20000);
1473 if (!(adapter->test_icr &mask)) {
1480 * Disable the other interrupts to be reported in
1481 * the cause register and then force the other
1482 * interrupts and see if any get posted. If
1483 * an interrupt was posted to the bus, the
1486 adapter->test_icr = 0;
1487 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
1488 ~mask & 0x00007FFF);
1489 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1490 ~mask & 0x00007FFF);
1491 IXGBE_WRITE_FLUSH(&adapter->hw);
1492 usleep_range(10000, 20000);
1494 if (adapter->test_icr) {
1501 /* Disable all the interrupts */
1502 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1503 IXGBE_WRITE_FLUSH(&adapter->hw);
1504 usleep_range(10000, 20000);
1506 /* Unhook test interrupt handler */
1507 free_irq(irq, netdev);
1512 static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1514 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1515 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1516 struct ixgbe_hw *hw = &adapter->hw;
1519 /* shut down the DMA engines now so they can be reinitialized later */
1522 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1523 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1524 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1525 ixgbe_disable_rx_queue(adapter, rx_ring);
1528 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1529 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1532 switch (hw->mac.type) {
1533 case ixgbe_mac_82599EB:
1534 case ixgbe_mac_X540:
1535 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1536 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1537 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1543 ixgbe_reset(adapter);
1545 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1546 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1549 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1551 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1552 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1557 /* Setup Tx descriptor ring and Tx buffers */
1558 tx_ring->count = IXGBE_DEFAULT_TXD;
1559 tx_ring->queue_index = 0;
1560 tx_ring->dev = &adapter->pdev->dev;
1561 tx_ring->netdev = adapter->netdev;
1562 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1564 err = ixgbe_setup_tx_resources(tx_ring);
1568 switch (adapter->hw.mac.type) {
1569 case ixgbe_mac_82599EB:
1570 case ixgbe_mac_X540:
1571 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1572 reg_data |= IXGBE_DMATXCTL_TE;
1573 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1579 ixgbe_configure_tx_ring(adapter, tx_ring);
1581 /* Setup Rx Descriptor ring and Rx buffers */
1582 rx_ring->count = IXGBE_DEFAULT_RXD;
1583 rx_ring->queue_index = 0;
1584 rx_ring->dev = &adapter->pdev->dev;
1585 rx_ring->netdev = adapter->netdev;
1586 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1588 err = ixgbe_setup_rx_resources(rx_ring);
1594 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1595 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1597 ixgbe_configure_rx_ring(adapter, rx_ring);
1599 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1600 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1605 ixgbe_free_desc_rings(adapter);
1609 static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1611 struct ixgbe_hw *hw = &adapter->hw;
1614 /* X540 needs to set the MACC.FLU bit to force link up */
1615 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1616 reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
1617 reg_data |= IXGBE_MACC_FLU;
1618 IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
1621 /* right now we only support MAC loopback in the driver */
1622 reg_data = IXGBE_READ_REG(hw, IXGBE_HLREG0);
1623 /* Setup MAC loopback */
1624 reg_data |= IXGBE_HLREG0_LPBK;
1625 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, reg_data);
1627 reg_data = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1628 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1629 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data);
1631 reg_data = IXGBE_READ_REG(hw, IXGBE_AUTOC);
1632 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1633 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1634 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data);
1635 IXGBE_WRITE_FLUSH(hw);
1636 usleep_range(10000, 20000);
1638 /* Disable Atlas Tx lanes; re-enabled in reset path */
1639 if (hw->mac.type == ixgbe_mac_82598EB) {
1642 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &atlas);
1643 atlas |= IXGBE_ATLAS_PDN_TX_REG_EN;
1644 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, atlas);
1646 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &atlas);
1647 atlas |= IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
1648 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, atlas);
1650 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &atlas);
1651 atlas |= IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
1652 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, atlas);
1654 hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &atlas);
1655 atlas |= IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
1656 hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, atlas);
1662 static void ixgbe_loopback_cleanup(struct ixgbe_adapter *adapter)
1666 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1667 reg_data &= ~IXGBE_HLREG0_LPBK;
1668 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1671 static void ixgbe_create_lbtest_frame(struct sk_buff *skb,
1672 unsigned int frame_size)
1674 memset(skb->data, 0xFF, frame_size);
1676 memset(&skb->data[frame_size], 0xAA, frame_size / 2 - 1);
1677 memset(&skb->data[frame_size + 10], 0xBE, 1);
1678 memset(&skb->data[frame_size + 12], 0xAF, 1);
1681 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
1682 unsigned int frame_size)
1684 unsigned char *data;
1689 data = kmap(rx_buffer->page) + rx_buffer->page_offset;
1691 if (data[3] != 0xFF ||
1692 data[frame_size + 10] != 0xBE ||
1693 data[frame_size + 12] != 0xAF)
1696 kunmap(rx_buffer->page);
1701 static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1702 struct ixgbe_ring *tx_ring,
1705 union ixgbe_adv_rx_desc *rx_desc;
1706 struct ixgbe_rx_buffer *rx_buffer;
1707 struct ixgbe_tx_buffer *tx_buffer;
1708 u16 rx_ntc, tx_ntc, count = 0;
1710 /* initialize next to clean and descriptor values */
1711 rx_ntc = rx_ring->next_to_clean;
1712 tx_ntc = tx_ring->next_to_clean;
1713 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1715 while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) {
1716 /* check Rx buffer */
1717 rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
1719 /* sync Rx buffer for CPU read */
1720 dma_sync_single_for_cpu(rx_ring->dev,
1722 ixgbe_rx_bufsz(rx_ring),
1725 /* verify contents of skb */
1726 if (ixgbe_check_lbtest_frame(rx_buffer, size))
1729 /* sync Rx buffer for device write */
1730 dma_sync_single_for_device(rx_ring->dev,
1732 ixgbe_rx_bufsz(rx_ring),
1735 /* unmap buffer on Tx side */
1736 tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
1737 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
1739 /* increment Rx/Tx next to clean counters */
1741 if (rx_ntc == rx_ring->count)
1744 if (tx_ntc == tx_ring->count)
1747 /* fetch next descriptor */
1748 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
1751 netdev_tx_reset_queue(txring_txq(tx_ring));
1753 /* re-map buffers to ring, store next to clean values */
1754 ixgbe_alloc_rx_buffers(rx_ring, count);
1755 rx_ring->next_to_clean = rx_ntc;
1756 tx_ring->next_to_clean = tx_ntc;
1761 static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1763 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1764 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1765 int i, j, lc, good_cnt, ret_val = 0;
1766 unsigned int size = 1024;
1767 netdev_tx_t tx_ret_val;
1768 struct sk_buff *skb;
1770 /* allocate test skb */
1771 skb = alloc_skb(size, GFP_KERNEL);
1775 /* place data into test skb */
1776 ixgbe_create_lbtest_frame(skb, size);
1780 * Calculate the loop count based on the largest descriptor ring
1781 * The idea is to wrap the largest ring a number of times using 64
1782 * send/receive pairs during each loop
1785 if (rx_ring->count <= tx_ring->count)
1786 lc = ((tx_ring->count / 64) * 2) + 1;
1788 lc = ((rx_ring->count / 64) * 2) + 1;
1790 for (j = 0; j <= lc; j++) {
1791 /* reset count of good packets */
1794 /* place 64 packets on the transmit queue*/
1795 for (i = 0; i < 64; i++) {
1797 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1800 if (tx_ret_val == NETDEV_TX_OK)
1804 if (good_cnt != 64) {
1809 /* allow 200 milliseconds for packets to go from Tx to Rx */
1812 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1813 if (good_cnt != 64) {
1819 /* free the original skb */
1825 static int ixgbe_loopback_test(struct ixgbe_adapter *adapter, u64 *data)
1827 *data = ixgbe_setup_desc_rings(adapter);
1830 *data = ixgbe_setup_loopback_test(adapter);
1833 *data = ixgbe_run_loopback_test(adapter);
1834 ixgbe_loopback_cleanup(adapter);
1837 ixgbe_free_desc_rings(adapter);
1842 static void ixgbe_diag_test(struct net_device *netdev,
1843 struct ethtool_test *eth_test, u64 *data)
1845 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1846 struct ixgbe_hw *hw = &adapter->hw;
1847 bool if_running = netif_running(netdev);
1849 set_bit(__IXGBE_TESTING, &adapter->state);
1850 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1851 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
1853 for (i = 0; i < adapter->num_vfs; i++) {
1854 if (adapter->vfinfo[i].clear_to_send) {
1855 netdev_warn(netdev, "%s",
1856 "offline diagnostic is not "
1857 "supported when VFs are "
1863 eth_test->flags |= ETH_TEST_FL_FAILED;
1864 clear_bit(__IXGBE_TESTING,
1872 e_info(hw, "offline testing starting\n");
1875 /* indicate we're in test mode */
1878 /* bringing adapter down disables SFP+ optics */
1879 if (hw->mac.ops.enable_tx_laser)
1880 hw->mac.ops.enable_tx_laser(hw);
1882 /* Link test performed before hardware reset so autoneg doesn't
1883 * interfere with test result
1885 if (ixgbe_link_test(adapter, &data[4]))
1886 eth_test->flags |= ETH_TEST_FL_FAILED;
1888 ixgbe_reset(adapter);
1889 e_info(hw, "register testing starting\n");
1890 if (ixgbe_reg_test(adapter, &data[0]))
1891 eth_test->flags |= ETH_TEST_FL_FAILED;
1893 ixgbe_reset(adapter);
1894 e_info(hw, "eeprom testing starting\n");
1895 if (ixgbe_eeprom_test(adapter, &data[1]))
1896 eth_test->flags |= ETH_TEST_FL_FAILED;
1898 ixgbe_reset(adapter);
1899 e_info(hw, "interrupt testing starting\n");
1900 if (ixgbe_intr_test(adapter, &data[2]))
1901 eth_test->flags |= ETH_TEST_FL_FAILED;
1903 /* If SRIOV or VMDq is enabled then skip MAC
1904 * loopback diagnostic. */
1905 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
1906 IXGBE_FLAG_VMDQ_ENABLED)) {
1907 e_info(hw, "Skip MAC loopback diagnostic in VT "
1913 ixgbe_reset(adapter);
1914 e_info(hw, "loopback testing starting\n");
1915 if (ixgbe_loopback_test(adapter, &data[3]))
1916 eth_test->flags |= ETH_TEST_FL_FAILED;
1919 ixgbe_reset(adapter);
1921 /* clear testing bit and return adapter to previous state */
1922 clear_bit(__IXGBE_TESTING, &adapter->state);
1926 e_info(hw, "online testing starting\n");
1928 /* if adapter is down, SFP+ optics will be disabled */
1929 if (!if_running && hw->mac.ops.enable_tx_laser)
1930 hw->mac.ops.enable_tx_laser(hw);
1933 if (ixgbe_link_test(adapter, &data[4]))
1934 eth_test->flags |= ETH_TEST_FL_FAILED;
1936 /* Offline tests aren't run; pass by default */
1942 clear_bit(__IXGBE_TESTING, &adapter->state);
1945 /* if adapter was down, ensure SFP+ optics are disabled again */
1946 if (!if_running && hw->mac.ops.disable_tx_laser)
1947 hw->mac.ops.disable_tx_laser(hw);
1949 msleep_interruptible(4 * 1000);
1952 static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1953 struct ethtool_wolinfo *wol)
1955 struct ixgbe_hw *hw = &adapter->hw;
1958 /* WOL not supported for all devices */
1959 if (!ixgbe_wol_supported(adapter, hw->device_id,
1960 hw->subsystem_device_id)) {
1968 static void ixgbe_get_wol(struct net_device *netdev,
1969 struct ethtool_wolinfo *wol)
1971 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1973 wol->supported = WAKE_UCAST | WAKE_MCAST |
1974 WAKE_BCAST | WAKE_MAGIC;
1977 if (ixgbe_wol_exclusion(adapter, wol) ||
1978 !device_can_wakeup(&adapter->pdev->dev))
1981 if (adapter->wol & IXGBE_WUFC_EX)
1982 wol->wolopts |= WAKE_UCAST;
1983 if (adapter->wol & IXGBE_WUFC_MC)
1984 wol->wolopts |= WAKE_MCAST;
1985 if (adapter->wol & IXGBE_WUFC_BC)
1986 wol->wolopts |= WAKE_BCAST;
1987 if (adapter->wol & IXGBE_WUFC_MAG)
1988 wol->wolopts |= WAKE_MAGIC;
1991 static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1993 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1995 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1998 if (ixgbe_wol_exclusion(adapter, wol))
1999 return wol->wolopts ? -EOPNOTSUPP : 0;
2003 if (wol->wolopts & WAKE_UCAST)
2004 adapter->wol |= IXGBE_WUFC_EX;
2005 if (wol->wolopts & WAKE_MCAST)
2006 adapter->wol |= IXGBE_WUFC_MC;
2007 if (wol->wolopts & WAKE_BCAST)
2008 adapter->wol |= IXGBE_WUFC_BC;
2009 if (wol->wolopts & WAKE_MAGIC)
2010 adapter->wol |= IXGBE_WUFC_MAG;
2012 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2017 static int ixgbe_nway_reset(struct net_device *netdev)
2019 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2021 if (netif_running(netdev))
2022 ixgbe_reinit_locked(adapter);
2027 static int ixgbe_set_phys_id(struct net_device *netdev,
2028 enum ethtool_phys_id_state state)
2030 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2031 struct ixgbe_hw *hw = &adapter->hw;
2034 case ETHTOOL_ID_ACTIVE:
2035 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2039 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2042 case ETHTOOL_ID_OFF:
2043 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2046 case ETHTOOL_ID_INACTIVE:
2047 /* Restore LED settings */
2048 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2055 static int ixgbe_get_coalesce(struct net_device *netdev,
2056 struct ethtool_coalesce *ec)
2058 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2060 /* only valid if in constant ITR mode */
2061 if (adapter->rx_itr_setting <= 1)
2062 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
2064 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
2066 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2067 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2070 /* only valid if in constant ITR mode */
2071 if (adapter->tx_itr_setting <= 1)
2072 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
2074 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
2080 * this function must be called before setting the new value of
2083 static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
2085 struct net_device *netdev = adapter->netdev;
2087 /* nothing to do if LRO or RSC are not enabled */
2088 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
2089 !(netdev->features & NETIF_F_LRO))
2092 /* check the feature flag value and enable RSC if necessary */
2093 if (adapter->rx_itr_setting == 1 ||
2094 adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
2095 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2096 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2097 e_info(probe, "rx-usecs value high enough "
2098 "to re-enable RSC\n");
2101 /* if interrupt rate is too high then disable RSC */
2102 } else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2103 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2104 e_info(probe, "rx-usecs set too low, disabling RSC\n");
2110 static int ixgbe_set_coalesce(struct net_device *netdev,
2111 struct ethtool_coalesce *ec)
2113 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2114 struct ixgbe_q_vector *q_vector;
2116 u16 tx_itr_param, rx_itr_param, tx_itr_prev;
2117 bool need_reset = false;
2119 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) {
2120 /* reject Tx specific changes in case of mixed RxTx vectors */
2121 if (ec->tx_coalesce_usecs)
2123 tx_itr_prev = adapter->rx_itr_setting;
2125 tx_itr_prev = adapter->tx_itr_setting;
2128 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
2129 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
2132 if (ec->rx_coalesce_usecs > 1)
2133 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
2135 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
2137 if (adapter->rx_itr_setting == 1)
2138 rx_itr_param = IXGBE_20K_ITR;
2140 rx_itr_param = adapter->rx_itr_setting;
2142 if (ec->tx_coalesce_usecs > 1)
2143 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
2145 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
2147 if (adapter->tx_itr_setting == 1)
2148 tx_itr_param = IXGBE_10K_ITR;
2150 tx_itr_param = adapter->tx_itr_setting;
2153 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
2154 adapter->tx_itr_setting = adapter->rx_itr_setting;
2156 #if IS_ENABLED(CONFIG_BQL)
2157 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2158 if ((adapter->tx_itr_setting > 1) &&
2159 (adapter->tx_itr_setting < IXGBE_100K_ITR)) {
2160 if ((tx_itr_prev == 1) ||
2161 (tx_itr_prev > IXGBE_100K_ITR))
2164 if ((tx_itr_prev > 1) &&
2165 (tx_itr_prev < IXGBE_100K_ITR))
2169 /* check the old value and enable RSC if necessary */
2170 need_reset |= ixgbe_update_rsc(adapter);
2172 for (i = 0; i < adapter->num_q_vectors; i++) {
2173 q_vector = adapter->q_vector[i];
2174 if (q_vector->tx.count && !q_vector->rx.count)
2176 q_vector->itr = tx_itr_param;
2178 /* rx only or mixed */
2179 q_vector->itr = rx_itr_param;
2180 ixgbe_write_eitr(q_vector);
2184 * do reset here at the end to make sure EITR==0 case is handled
2185 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2186 * also locks in RSC enable/disable which requires reset
2189 ixgbe_do_reset(netdev);
2194 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2195 struct ethtool_rxnfc *cmd)
2197 union ixgbe_atr_input *mask = &adapter->fdir_mask;
2198 struct ethtool_rx_flow_spec *fsp =
2199 (struct ethtool_rx_flow_spec *)&cmd->fs;
2200 struct hlist_node *node, *node2;
2201 struct ixgbe_fdir_filter *rule = NULL;
2203 /* report total rule count */
2204 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2206 hlist_for_each_entry_safe(rule, node, node2,
2207 &adapter->fdir_filter_list, fdir_node) {
2208 if (fsp->location <= rule->sw_idx)
2212 if (!rule || fsp->location != rule->sw_idx)
2215 /* fill out the flow spec entry */
2217 /* set flow type field */
2218 switch (rule->filter.formatted.flow_type) {
2219 case IXGBE_ATR_FLOW_TYPE_TCPV4:
2220 fsp->flow_type = TCP_V4_FLOW;
2222 case IXGBE_ATR_FLOW_TYPE_UDPV4:
2223 fsp->flow_type = UDP_V4_FLOW;
2225 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
2226 fsp->flow_type = SCTP_V4_FLOW;
2228 case IXGBE_ATR_FLOW_TYPE_IPV4:
2229 fsp->flow_type = IP_USER_FLOW;
2230 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
2231 fsp->h_u.usr_ip4_spec.proto = 0;
2232 fsp->m_u.usr_ip4_spec.proto = 0;
2238 fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
2239 fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
2240 fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
2241 fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
2242 fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
2243 fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
2244 fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
2245 fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
2246 fsp->h_ext.vlan_tci = rule->filter.formatted.vlan_id;
2247 fsp->m_ext.vlan_tci = mask->formatted.vlan_id;
2248 fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
2249 fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
2250 fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
2251 fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
2252 fsp->flow_type |= FLOW_EXT;
2255 if (rule->action == IXGBE_FDIR_DROP_QUEUE)
2256 fsp->ring_cookie = RX_CLS_FLOW_DISC;
2258 fsp->ring_cookie = rule->action;
2263 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
2264 struct ethtool_rxnfc *cmd,
2267 struct hlist_node *node, *node2;
2268 struct ixgbe_fdir_filter *rule;
2271 /* report total rule count */
2272 cmd->data = (1024 << adapter->fdir_pballoc) - 2;
2274 hlist_for_each_entry_safe(rule, node, node2,
2275 &adapter->fdir_filter_list, fdir_node) {
2276 if (cnt == cmd->rule_cnt)
2278 rule_locs[cnt] = rule->sw_idx;
2282 cmd->rule_cnt = cnt;
2287 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
2288 struct ethtool_rxnfc *cmd)
2292 /* Report default options for RSS on ixgbe */
2293 switch (cmd->flow_type) {
2295 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2297 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2298 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2300 case AH_ESP_V4_FLOW:
2304 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2307 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2309 if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2310 cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
2312 case AH_ESP_V6_FLOW:
2316 cmd->data |= RXH_IP_SRC | RXH_IP_DST;
2325 static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
2328 struct ixgbe_adapter *adapter = netdev_priv(dev);
2329 int ret = -EOPNOTSUPP;
2332 case ETHTOOL_GRXRINGS:
2333 cmd->data = adapter->num_rx_queues;
2336 case ETHTOOL_GRXCLSRLCNT:
2337 cmd->rule_cnt = adapter->fdir_filter_count;
2340 case ETHTOOL_GRXCLSRULE:
2341 ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd);
2343 case ETHTOOL_GRXCLSRLALL:
2344 ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
2347 ret = ixgbe_get_rss_hash_opts(adapter, cmd);
2356 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2357 struct ixgbe_fdir_filter *input,
2360 struct ixgbe_hw *hw = &adapter->hw;
2361 struct hlist_node *node, *node2, *parent;
2362 struct ixgbe_fdir_filter *rule;
2368 hlist_for_each_entry_safe(rule, node, node2,
2369 &adapter->fdir_filter_list, fdir_node) {
2370 /* hash found, or no matching entry */
2371 if (rule->sw_idx >= sw_idx)
2376 /* if there is an old rule occupying our place remove it */
2377 if (rule && (rule->sw_idx == sw_idx)) {
2378 if (!input || (rule->filter.formatted.bkt_hash !=
2379 input->filter.formatted.bkt_hash)) {
2380 err = ixgbe_fdir_erase_perfect_filter_82599(hw,
2385 hlist_del(&rule->fdir_node);
2387 adapter->fdir_filter_count--;
2391 * If no input this was a delete, err should be 0 if a rule was
2392 * successfully found and removed from the list else -EINVAL
2397 /* initialize node and set software index */
2398 INIT_HLIST_NODE(&input->fdir_node);
2400 /* add filter to the list */
2402 hlist_add_after(parent, &input->fdir_node);
2404 hlist_add_head(&input->fdir_node,
2405 &adapter->fdir_filter_list);
2408 adapter->fdir_filter_count++;
2413 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
2416 switch (fsp->flow_type & ~FLOW_EXT) {
2418 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2421 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2424 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2427 switch (fsp->h_u.usr_ip4_spec.proto) {
2429 *flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2432 *flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2435 *flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2438 if (!fsp->m_u.usr_ip4_spec.proto) {
2439 *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2453 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2454 struct ethtool_rxnfc *cmd)
2456 struct ethtool_rx_flow_spec *fsp =
2457 (struct ethtool_rx_flow_spec *)&cmd->fs;
2458 struct ixgbe_hw *hw = &adapter->hw;
2459 struct ixgbe_fdir_filter *input;
2460 union ixgbe_atr_input mask;
2463 if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
2467 * Don't allow programming if the action is a queue greater than
2468 * the number of online Rx queues.
2470 if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
2471 (fsp->ring_cookie >= adapter->num_rx_queues))
2474 /* Don't allow indexes to exist outside of available space */
2475 if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
2476 e_err(drv, "Location out of range\n");
2480 input = kzalloc(sizeof(*input), GFP_ATOMIC);
2484 memset(&mask, 0, sizeof(union ixgbe_atr_input));
2487 input->sw_idx = fsp->location;
2489 /* record flow type */
2490 if (!ixgbe_flowspec_to_flow_type(fsp,
2491 &input->filter.formatted.flow_type)) {
2492 e_err(drv, "Unrecognized flow type\n");
2496 mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
2497 IXGBE_ATR_L4TYPE_MASK;
2499 if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
2500 mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
2502 /* Copy input into formatted structures */
2503 input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
2504 mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
2505 input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
2506 mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
2507 input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
2508 mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
2509 input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
2510 mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
2512 if (fsp->flow_type & FLOW_EXT) {
2513 input->filter.formatted.vm_pool =
2514 (unsigned char)ntohl(fsp->h_ext.data[1]);
2515 mask.formatted.vm_pool =
2516 (unsigned char)ntohl(fsp->m_ext.data[1]);
2517 input->filter.formatted.vlan_id = fsp->h_ext.vlan_tci;
2518 mask.formatted.vlan_id = fsp->m_ext.vlan_tci;
2519 input->filter.formatted.flex_bytes =
2520 fsp->h_ext.vlan_etype;
2521 mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
2524 /* determine if we need to drop or route the packet */
2525 if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
2526 input->action = IXGBE_FDIR_DROP_QUEUE;
2528 input->action = fsp->ring_cookie;
2530 spin_lock(&adapter->fdir_perfect_lock);
2532 if (hlist_empty(&adapter->fdir_filter_list)) {
2533 /* save mask and program input mask into HW */
2534 memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
2535 err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
2537 e_err(drv, "Error writing mask\n");
2538 goto err_out_w_lock;
2540 } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
2541 e_err(drv, "Only one mask supported per port\n");
2542 goto err_out_w_lock;
2545 /* apply mask and compute/store hash */
2546 ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
2548 /* program filters to filter memory */
2549 err = ixgbe_fdir_write_perfect_filter_82599(hw,
2550 &input->filter, input->sw_idx,
2551 (input->action == IXGBE_FDIR_DROP_QUEUE) ?
2552 IXGBE_FDIR_DROP_QUEUE :
2553 adapter->rx_ring[input->action]->reg_idx);
2555 goto err_out_w_lock;
2557 ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
2559 spin_unlock(&adapter->fdir_perfect_lock);
2563 spin_unlock(&adapter->fdir_perfect_lock);
2569 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
2570 struct ethtool_rxnfc *cmd)
2572 struct ethtool_rx_flow_spec *fsp =
2573 (struct ethtool_rx_flow_spec *)&cmd->fs;
2576 spin_lock(&adapter->fdir_perfect_lock);
2577 err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, fsp->location);
2578 spin_unlock(&adapter->fdir_perfect_lock);
2583 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2584 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2585 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
2586 struct ethtool_rxnfc *nfc)
2588 u32 flags2 = adapter->flags2;
2591 * RSS does not support anything other than hashing
2592 * to queues on src and dst IPs and ports
2594 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
2595 RXH_L4_B_0_1 | RXH_L4_B_2_3))
2598 switch (nfc->flow_type) {
2601 if (!(nfc->data & RXH_IP_SRC) ||
2602 !(nfc->data & RXH_IP_DST) ||
2603 !(nfc->data & RXH_L4_B_0_1) ||
2604 !(nfc->data & RXH_L4_B_2_3))
2608 if (!(nfc->data & RXH_IP_SRC) ||
2609 !(nfc->data & RXH_IP_DST))
2611 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2613 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2615 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2616 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
2623 if (!(nfc->data & RXH_IP_SRC) ||
2624 !(nfc->data & RXH_IP_DST))
2626 switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
2628 flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2630 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
2631 flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
2637 case AH_ESP_V4_FLOW:
2641 case AH_ESP_V6_FLOW:
2645 if (!(nfc->data & RXH_IP_SRC) ||
2646 !(nfc->data & RXH_IP_DST) ||
2647 (nfc->data & RXH_L4_B_0_1) ||
2648 (nfc->data & RXH_L4_B_2_3))
2655 /* if we changed something we need to update flags */
2656 if (flags2 != adapter->flags2) {
2657 struct ixgbe_hw *hw = &adapter->hw;
2658 u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
2660 if ((flags2 & UDP_RSS_FLAGS) &&
2661 !(adapter->flags2 & UDP_RSS_FLAGS))
2662 e_warn(drv, "enabling UDP RSS: fragmented packets"
2663 " may arrive out of order to the stack above\n");
2665 adapter->flags2 = flags2;
2667 /* Perform hash on these packet types */
2668 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
2669 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2670 | IXGBE_MRQC_RSS_FIELD_IPV6
2671 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
2673 mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
2674 IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
2676 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
2677 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
2679 if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2680 mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
2682 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
2688 static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
2690 struct ixgbe_adapter *adapter = netdev_priv(dev);
2691 int ret = -EOPNOTSUPP;
2694 case ETHTOOL_SRXCLSRLINS:
2695 ret = ixgbe_add_ethtool_fdir_entry(adapter, cmd);
2697 case ETHTOOL_SRXCLSRLDEL:
2698 ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
2701 ret = ixgbe_set_rss_hash_opt(adapter, cmd);
2710 static int ixgbe_get_ts_info(struct net_device *dev,
2711 struct ethtool_ts_info *info)
2713 struct ixgbe_adapter *adapter = netdev_priv(dev);
2715 switch (adapter->hw.mac.type) {
2716 case ixgbe_mac_X540:
2717 case ixgbe_mac_82599EB:
2718 info->so_timestamping =
2719 SOF_TIMESTAMPING_TX_SOFTWARE |
2720 SOF_TIMESTAMPING_RX_SOFTWARE |
2721 SOF_TIMESTAMPING_SOFTWARE |
2722 SOF_TIMESTAMPING_TX_HARDWARE |
2723 SOF_TIMESTAMPING_RX_HARDWARE |
2724 SOF_TIMESTAMPING_RAW_HARDWARE;
2726 if (adapter->ptp_clock)
2727 info->phc_index = ptp_clock_index(adapter->ptp_clock);
2729 info->phc_index = -1;
2732 (1 << HWTSTAMP_TX_OFF) |
2733 (1 << HWTSTAMP_TX_ON);
2736 (1 << HWTSTAMP_FILTER_NONE) |
2737 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
2738 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
2739 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
2740 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
2741 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
2742 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
2743 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
2744 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
2745 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
2746 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
2747 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
2750 return ethtool_op_get_ts_info(dev, info);
2756 static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
2758 unsigned int max_combined;
2759 u8 tcs = netdev_get_num_tc(adapter->netdev);
2761 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
2762 /* We only support one q_vector without MSI-X */
2764 } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
2765 /* SR-IOV currently only allows one queue on the PF */
2767 } else if (tcs > 1) {
2768 /* For DCB report channels per traffic class */
2769 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2770 /* 8 TC w/ 4 queues per TC */
2772 } else if (tcs > 4) {
2773 /* 8 TC w/ 8 queues per TC */
2776 /* 4 TC w/ 16 queues per TC */
2779 } else if (adapter->atr_sample_rate) {
2780 /* support up to 64 queues with ATR */
2781 max_combined = IXGBE_MAX_FDIR_INDICES;
2783 /* support up to 16 queues with RSS */
2784 max_combined = IXGBE_MAX_RSS_INDICES;
2787 return max_combined;
2790 static void ixgbe_get_channels(struct net_device *dev,
2791 struct ethtool_channels *ch)
2793 struct ixgbe_adapter *adapter = netdev_priv(dev);
2795 /* report maximum channels */
2796 ch->max_combined = ixgbe_max_channels(adapter);
2798 /* report info for other vector */
2799 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2800 ch->max_other = NON_Q_VECTORS;
2801 ch->other_count = NON_Q_VECTORS;
2804 /* record RSS queues */
2805 ch->combined_count = adapter->ring_feature[RING_F_RSS].indices;
2807 /* nothing else to report if RSS is disabled */
2808 if (ch->combined_count == 1)
2811 /* we do not support ATR queueing if SR-IOV is enabled */
2812 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
2815 /* same thing goes for being DCB enabled */
2816 if (netdev_get_num_tc(dev) > 1)
2819 /* if ATR is disabled we can exit */
2820 if (!adapter->atr_sample_rate)
2823 /* report flow director queues as maximum channels */
2824 ch->combined_count = adapter->ring_feature[RING_F_FDIR].indices;
2827 static int ixgbe_set_channels(struct net_device *dev,
2828 struct ethtool_channels *ch)
2830 struct ixgbe_adapter *adapter = netdev_priv(dev);
2831 unsigned int count = ch->combined_count;
2833 /* verify they are not requesting separate vectors */
2834 if (!count || ch->rx_count || ch->tx_count)
2837 /* verify other_count has not changed */
2838 if (ch->other_count != NON_Q_VECTORS)
2841 /* verify the number of channels does not exceed hardware limits */
2842 if (count > ixgbe_max_channels(adapter))
2845 /* update feature limits from largest to smallest supported values */
2846 adapter->ring_feature[RING_F_FDIR].limit = count;
2848 /* cap RSS limit at 16 */
2849 if (count > IXGBE_MAX_RSS_INDICES)
2850 count = IXGBE_MAX_RSS_INDICES;
2851 adapter->ring_feature[RING_F_RSS].limit = count;
2854 /* cap FCoE limit at 8 */
2855 if (count > IXGBE_FCRETA_SIZE)
2856 count = IXGBE_FCRETA_SIZE;
2857 adapter->ring_feature[RING_F_FCOE].limit = count;
2860 /* use setup TC to update any traffic class queue mapping */
2861 return ixgbe_setup_tc(dev, netdev_get_num_tc(dev));
2864 static int ixgbe_get_module_info(struct net_device *dev,
2865 struct ethtool_modinfo *modinfo)
2867 struct ixgbe_adapter *adapter = netdev_priv(dev);
2868 struct ixgbe_hw *hw = &adapter->hw;
2870 u8 sff8472_rev, addr_mode;
2872 bool page_swap = false;
2874 /* avoid concurent i2c reads */
2875 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2878 /* used by the service task */
2879 set_bit(__IXGBE_READ_I2C, &adapter->state);
2881 /* Check whether we support SFF-8472 or not */
2882 status = hw->phy.ops.read_i2c_eeprom(hw,
2883 IXGBE_SFF_SFF_8472_COMP,
2890 /* addressing mode is not supported */
2891 status = hw->phy.ops.read_i2c_eeprom(hw,
2892 IXGBE_SFF_SFF_8472_SWAP,
2899 if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
2900 e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
2904 if (sff8472_rev == IXGBE_SFF_SFF_8472_UNSUP || page_swap) {
2905 /* We have a SFP, but it does not support SFF-8472 */
2906 modinfo->type = ETH_MODULE_SFF_8079;
2907 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
2909 /* We have a SFP which supports a revision of SFF-8472. */
2910 modinfo->type = ETH_MODULE_SFF_8472;
2911 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2915 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2919 static int ixgbe_get_module_eeprom(struct net_device *dev,
2920 struct ethtool_eeprom *ee,
2923 struct ixgbe_adapter *adapter = netdev_priv(dev);
2924 struct ixgbe_hw *hw = &adapter->hw;
2925 u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
2930 /* ixgbe_get_module_info is called before this function in all
2931 * cases, so we do not need any checks we already do above,
2932 * and can trust ee->len to be a known value.
2935 while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
2937 set_bit(__IXGBE_READ_I2C, &adapter->state);
2939 /* Read the first block, SFF-8079 */
2940 for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
2941 status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
2943 /* Error occured while reading module */
2950 /* If the second block is requested, check if SFF-8472 is supported. */
2951 if (ee->len == ETH_MODULE_SFF_8472_LEN) {
2952 if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
2955 /* Read the second block, SFF-8472 */
2956 for (i = ETH_MODULE_SFF_8079_LEN;
2957 i < ETH_MODULE_SFF_8472_LEN; i++) {
2958 status = hw->phy.ops.read_i2c_sff8472(hw,
2959 i - ETH_MODULE_SFF_8079_LEN, &databyte);
2961 /* Error occured while reading module */
2970 clear_bit(__IXGBE_READ_I2C, &adapter->state);
2975 static const struct ethtool_ops ixgbe_ethtool_ops = {
2976 .get_settings = ixgbe_get_settings,
2977 .set_settings = ixgbe_set_settings,
2978 .get_drvinfo = ixgbe_get_drvinfo,
2979 .get_regs_len = ixgbe_get_regs_len,
2980 .get_regs = ixgbe_get_regs,
2981 .get_wol = ixgbe_get_wol,
2982 .set_wol = ixgbe_set_wol,
2983 .nway_reset = ixgbe_nway_reset,
2984 .get_link = ethtool_op_get_link,
2985 .get_eeprom_len = ixgbe_get_eeprom_len,
2986 .get_eeprom = ixgbe_get_eeprom,
2987 .set_eeprom = ixgbe_set_eeprom,
2988 .get_ringparam = ixgbe_get_ringparam,
2989 .set_ringparam = ixgbe_set_ringparam,
2990 .get_pauseparam = ixgbe_get_pauseparam,
2991 .set_pauseparam = ixgbe_set_pauseparam,
2992 .get_msglevel = ixgbe_get_msglevel,
2993 .set_msglevel = ixgbe_set_msglevel,
2994 .self_test = ixgbe_diag_test,
2995 .get_strings = ixgbe_get_strings,
2996 .set_phys_id = ixgbe_set_phys_id,
2997 .get_sset_count = ixgbe_get_sset_count,
2998 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2999 .get_coalesce = ixgbe_get_coalesce,
3000 .set_coalesce = ixgbe_set_coalesce,
3001 .get_rxnfc = ixgbe_get_rxnfc,
3002 .set_rxnfc = ixgbe_set_rxnfc,
3003 .get_channels = ixgbe_get_channels,
3004 .set_channels = ixgbe_set_channels,
3005 .get_ts_info = ixgbe_get_ts_info,
3006 .get_module_info = ixgbe_get_module_info,
3007 .get_module_eeprom = ixgbe_get_module_eeprom,
3010 void ixgbe_set_ethtool_ops(struct net_device *netdev)
3012 SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);