1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/pci.h>
14 #include <linux/module.h>
15 #include <linux/seq_file.h>
16 #include <linux/i2c.h>
17 #include <linux/mii.h>
18 #include <linux/slab.h>
19 #include "net_driver.h"
24 #include "farch_regs.h"
27 #include "workarounds.h"
31 /* Hardware control for SFC4000 (aka Falcon). */
33 /**************************************************************************
35 * MAC stats DMA format
37 **************************************************************************
40 #define FALCON_MAC_STATS_SIZE 0x100
42 #define XgRxOctets_offset 0x0
43 #define XgRxOctets_WIDTH 48
44 #define XgRxOctetsOK_offset 0x8
45 #define XgRxOctetsOK_WIDTH 48
46 #define XgRxPkts_offset 0x10
47 #define XgRxPkts_WIDTH 32
48 #define XgRxPktsOK_offset 0x14
49 #define XgRxPktsOK_WIDTH 32
50 #define XgRxBroadcastPkts_offset 0x18
51 #define XgRxBroadcastPkts_WIDTH 32
52 #define XgRxMulticastPkts_offset 0x1C
53 #define XgRxMulticastPkts_WIDTH 32
54 #define XgRxUnicastPkts_offset 0x20
55 #define XgRxUnicastPkts_WIDTH 32
56 #define XgRxUndersizePkts_offset 0x24
57 #define XgRxUndersizePkts_WIDTH 32
58 #define XgRxOversizePkts_offset 0x28
59 #define XgRxOversizePkts_WIDTH 32
60 #define XgRxJabberPkts_offset 0x2C
61 #define XgRxJabberPkts_WIDTH 32
62 #define XgRxUndersizeFCSerrorPkts_offset 0x30
63 #define XgRxUndersizeFCSerrorPkts_WIDTH 32
64 #define XgRxDropEvents_offset 0x34
65 #define XgRxDropEvents_WIDTH 32
66 #define XgRxFCSerrorPkts_offset 0x38
67 #define XgRxFCSerrorPkts_WIDTH 32
68 #define XgRxAlignError_offset 0x3C
69 #define XgRxAlignError_WIDTH 32
70 #define XgRxSymbolError_offset 0x40
71 #define XgRxSymbolError_WIDTH 32
72 #define XgRxInternalMACError_offset 0x44
73 #define XgRxInternalMACError_WIDTH 32
74 #define XgRxControlPkts_offset 0x48
75 #define XgRxControlPkts_WIDTH 32
76 #define XgRxPausePkts_offset 0x4C
77 #define XgRxPausePkts_WIDTH 32
78 #define XgRxPkts64Octets_offset 0x50
79 #define XgRxPkts64Octets_WIDTH 32
80 #define XgRxPkts65to127Octets_offset 0x54
81 #define XgRxPkts65to127Octets_WIDTH 32
82 #define XgRxPkts128to255Octets_offset 0x58
83 #define XgRxPkts128to255Octets_WIDTH 32
84 #define XgRxPkts256to511Octets_offset 0x5C
85 #define XgRxPkts256to511Octets_WIDTH 32
86 #define XgRxPkts512to1023Octets_offset 0x60
87 #define XgRxPkts512to1023Octets_WIDTH 32
88 #define XgRxPkts1024to15xxOctets_offset 0x64
89 #define XgRxPkts1024to15xxOctets_WIDTH 32
90 #define XgRxPkts15xxtoMaxOctets_offset 0x68
91 #define XgRxPkts15xxtoMaxOctets_WIDTH 32
92 #define XgRxLengthError_offset 0x6C
93 #define XgRxLengthError_WIDTH 32
94 #define XgTxPkts_offset 0x80
95 #define XgTxPkts_WIDTH 32
96 #define XgTxOctets_offset 0x88
97 #define XgTxOctets_WIDTH 48
98 #define XgTxMulticastPkts_offset 0x90
99 #define XgTxMulticastPkts_WIDTH 32
100 #define XgTxBroadcastPkts_offset 0x94
101 #define XgTxBroadcastPkts_WIDTH 32
102 #define XgTxUnicastPkts_offset 0x98
103 #define XgTxUnicastPkts_WIDTH 32
104 #define XgTxControlPkts_offset 0x9C
105 #define XgTxControlPkts_WIDTH 32
106 #define XgTxPausePkts_offset 0xA0
107 #define XgTxPausePkts_WIDTH 32
108 #define XgTxPkts64Octets_offset 0xA4
109 #define XgTxPkts64Octets_WIDTH 32
110 #define XgTxPkts65to127Octets_offset 0xA8
111 #define XgTxPkts65to127Octets_WIDTH 32
112 #define XgTxPkts128to255Octets_offset 0xAC
113 #define XgTxPkts128to255Octets_WIDTH 32
114 #define XgTxPkts256to511Octets_offset 0xB0
115 #define XgTxPkts256to511Octets_WIDTH 32
116 #define XgTxPkts512to1023Octets_offset 0xB4
117 #define XgTxPkts512to1023Octets_WIDTH 32
118 #define XgTxPkts1024to15xxOctets_offset 0xB8
119 #define XgTxPkts1024to15xxOctets_WIDTH 32
120 #define XgTxPkts1519toMaxOctets_offset 0xBC
121 #define XgTxPkts1519toMaxOctets_WIDTH 32
122 #define XgTxUndersizePkts_offset 0xC0
123 #define XgTxUndersizePkts_WIDTH 32
124 #define XgTxOversizePkts_offset 0xC4
125 #define XgTxOversizePkts_WIDTH 32
126 #define XgTxNonTcpUdpPkt_offset 0xC8
127 #define XgTxNonTcpUdpPkt_WIDTH 16
128 #define XgTxMacSrcErrPkt_offset 0xCC
129 #define XgTxMacSrcErrPkt_WIDTH 16
130 #define XgTxIpSrcErrPkt_offset 0xD0
131 #define XgTxIpSrcErrPkt_WIDTH 16
132 #define XgDmaDone_offset 0xD4
133 #define XgDmaDone_WIDTH 32
135 #define FALCON_STATS_NOT_DONE 0x00000000
136 #define FALCON_STATS_DONE 0xffffffff
138 #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
139 #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
141 /* Retrieve statistic from statistics block */
142 #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
143 if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
144 (efx)->mac_stats.efx_stat += le16_to_cpu( \
145 *((__force __le16 *) \
146 (efx->stats_buffer.addr + \
147 FALCON_STAT_OFFSET(falcon_stat)))); \
148 else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
149 (efx)->mac_stats.efx_stat += le32_to_cpu( \
150 *((__force __le32 *) \
151 (efx->stats_buffer.addr + \
152 FALCON_STAT_OFFSET(falcon_stat)))); \
154 (efx)->mac_stats.efx_stat += le64_to_cpu( \
155 *((__force __le64 *) \
156 (efx->stats_buffer.addr + \
157 FALCON_STAT_OFFSET(falcon_stat)))); \
160 /**************************************************************************
162 * Non-volatile configuration
164 **************************************************************************
167 /* Board configuration v2 (v1 is obsolete; later versions are compatible) */
168 struct falcon_nvconfig_board_v2 {
174 __le16 asic_sub_revision;
175 __le16 board_revision;
178 /* Board configuration v3 extra information */
179 struct falcon_nvconfig_board_v3 {
180 __le32 spi_device_type[2];
183 /* Bit numbers for spi_device_type */
184 #define SPI_DEV_TYPE_SIZE_LBN 0
185 #define SPI_DEV_TYPE_SIZE_WIDTH 5
186 #define SPI_DEV_TYPE_ADDR_LEN_LBN 6
187 #define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
188 #define SPI_DEV_TYPE_ERASE_CMD_LBN 8
189 #define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
190 #define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
191 #define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
192 #define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
193 #define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
194 #define SPI_DEV_TYPE_FIELD(type, field) \
195 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
197 #define FALCON_NVCONFIG_OFFSET 0x300
199 #define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
200 struct falcon_nvconfig {
201 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
202 u8 mac_address[2][8]; /* 0x310 */
203 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
204 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
205 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
206 efx_oword_t hw_init_reg; /* 0x350 */
207 efx_oword_t nic_stat_reg; /* 0x360 */
208 efx_oword_t glb_ctl_reg; /* 0x370 */
209 efx_oword_t srm_cfg_reg; /* 0x380 */
210 efx_oword_t spare_reg; /* 0x390 */
211 __le16 board_magic_num; /* 0x3A0 */
212 __le16 board_struct_ver;
213 __le16 board_checksum;
214 struct falcon_nvconfig_board_v2 board_v2;
215 efx_oword_t ee_base_page_reg; /* 0x3B0 */
216 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
219 /*************************************************************************/
221 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
222 static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
224 static const unsigned int
225 /* "Large" EEPROM device: Atmel AT25640 or similar
226 * 8 KB, 16-bit address, 32 B write block */
227 large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN)
228 | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN)
229 | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)),
230 /* Default flash device: Atmel AT25F1024
231 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */
232 default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN)
233 | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN)
234 | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN)
235 | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN)
236 | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN));
238 /**************************************************************************
240 * I2C bus - this is a bit-bashing interface using GPIO pins
241 * Note that it uses the output enables to tristate the outputs
242 * SDA is the data pin and SCL is the clock
244 **************************************************************************
246 static void falcon_setsda(void *data, int state)
248 struct efx_nic *efx = (struct efx_nic *)data;
251 efx_reado(efx, ®, FR_AB_GPIO_CTL);
252 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
253 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
256 static void falcon_setscl(void *data, int state)
258 struct efx_nic *efx = (struct efx_nic *)data;
261 efx_reado(efx, ®, FR_AB_GPIO_CTL);
262 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
263 efx_writeo(efx, ®, FR_AB_GPIO_CTL);
266 static int falcon_getsda(void *data)
268 struct efx_nic *efx = (struct efx_nic *)data;
271 efx_reado(efx, ®, FR_AB_GPIO_CTL);
272 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
275 static int falcon_getscl(void *data)
277 struct efx_nic *efx = (struct efx_nic *)data;
280 efx_reado(efx, ®, FR_AB_GPIO_CTL);
281 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
284 static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
285 .setsda = falcon_setsda,
286 .setscl = falcon_setscl,
287 .getsda = falcon_getsda,
288 .getscl = falcon_getscl,
290 /* Wait up to 50 ms for slave to let us pull SCL high */
291 .timeout = DIV_ROUND_UP(HZ, 20),
294 static void falcon_push_irq_moderation(struct efx_channel *channel)
296 efx_dword_t timer_cmd;
297 struct efx_nic *efx = channel->efx;
299 /* Set timer register */
300 if (channel->irq_moderation) {
301 EFX_POPULATE_DWORD_2(timer_cmd,
302 FRF_AB_TC_TIMER_MODE,
303 FFE_BB_TIMER_MODE_INT_HLDOFF,
305 channel->irq_moderation - 1);
307 EFX_POPULATE_DWORD_2(timer_cmd,
308 FRF_AB_TC_TIMER_MODE,
309 FFE_BB_TIMER_MODE_DIS,
310 FRF_AB_TC_TIMER_VAL, 0);
312 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
313 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
317 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
319 static void falcon_prepare_flush(struct efx_nic *efx)
321 falcon_deconfigure_mac_wrapper(efx);
323 /* Wait for the tx and rx fifo's to get to the next packet boundary
324 * (~1ms without back-pressure), then to drain the remainder of the
325 * fifo's at data path speeds (negligible), with a healthy margin. */
329 /* Acknowledge a legacy interrupt from Falcon
331 * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
333 * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
334 * BIU. Interrupt acknowledge is read sensitive so must write instead
335 * (then read to ensure the BIU collector is flushed)
337 * NB most hardware supports MSI interrupts
339 static inline void falcon_irq_ack_a1(struct efx_nic *efx)
343 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
344 efx_writed(efx, ®, FR_AA_INT_ACK_KER);
345 efx_readd(efx, ®, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
349 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
351 struct efx_nic *efx = dev_id;
352 efx_oword_t *int_ker = efx->irq_status.addr;
356 /* Check to see if this is our interrupt. If it isn't, we
357 * exit without having touched the hardware.
359 if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
360 netif_vdbg(efx, intr, efx->net_dev,
361 "IRQ %d on CPU %d not for me\n", irq,
362 raw_smp_processor_id());
365 efx->last_irq_cpu = raw_smp_processor_id();
366 netif_vdbg(efx, intr, efx->net_dev,
367 "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
368 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
370 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
373 /* Check to see if we have a serious error condition */
374 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
375 if (unlikely(syserr))
376 return efx_farch_fatal_interrupt(efx);
378 /* Determine interrupting queues, clear interrupt status
379 * register and acknowledge the device interrupt.
381 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
382 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
383 EFX_ZERO_OWORD(*int_ker);
384 wmb(); /* Ensure the vector is cleared before interrupt ack */
385 falcon_irq_ack_a1(efx);
388 efx_schedule_channel_irq(efx_get_channel(efx, 0));
390 efx_schedule_channel_irq(efx_get_channel(efx, 1));
393 /**************************************************************************
397 **************************************************************************
400 #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
402 static int falcon_spi_poll(struct efx_nic *efx)
405 efx_reado(efx, ®, FR_AB_EE_SPI_HCMD);
406 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
409 /* Wait for SPI command completion */
410 static int falcon_spi_wait(struct efx_nic *efx)
412 /* Most commands will finish quickly, so we start polling at
413 * very short intervals. Sometimes the command may have to
414 * wait for VPD or expansion ROM access outside of our
415 * control, so we allow up to 100 ms. */
416 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10);
419 for (i = 0; i < 10; i++) {
420 if (!falcon_spi_poll(efx))
426 if (!falcon_spi_poll(efx))
428 if (time_after_eq(jiffies, timeout)) {
429 netif_err(efx, hw, efx->net_dev,
430 "timed out waiting for SPI\n");
433 schedule_timeout_uninterruptible(1);
437 int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
438 unsigned int command, int address,
439 const void *in, void *out, size_t len)
441 bool addressed = (address >= 0);
442 bool reading = (out != NULL);
446 /* Input validation */
447 if (len > FALCON_SPI_MAX_LEN)
450 /* Check that previous command is not still running */
451 rc = falcon_spi_poll(efx);
455 /* Program address register, if we have an address */
457 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
458 efx_writeo(efx, ®, FR_AB_EE_SPI_HADR);
461 /* Program data register, if we have data */
463 memcpy(®, in, len);
464 efx_writeo(efx, ®, FR_AB_EE_SPI_HDATA);
467 /* Issue read/write command */
468 EFX_POPULATE_OWORD_7(reg,
469 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
470 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
471 FRF_AB_EE_SPI_HCMD_DABCNT, len,
472 FRF_AB_EE_SPI_HCMD_READ, reading,
473 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
474 FRF_AB_EE_SPI_HCMD_ADBCNT,
475 (addressed ? spi->addr_len : 0),
476 FRF_AB_EE_SPI_HCMD_ENC, command);
477 efx_writeo(efx, ®, FR_AB_EE_SPI_HCMD);
479 /* Wait for read/write to complete */
480 rc = falcon_spi_wait(efx);
486 efx_reado(efx, ®, FR_AB_EE_SPI_HDATA);
487 memcpy(out, ®, len);
494 falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
496 return min(FALCON_SPI_MAX_LEN,
497 (spi->block_size - (start & (spi->block_size - 1))));
501 efx_spi_munge_command(const struct efx_spi_device *spi,
502 const u8 command, const unsigned int address)
504 return command | (((address >> 8) & spi->munge_address) << 3);
507 /* Wait up to 10 ms for buffered write completion */
509 falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
511 unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
516 rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
517 &status, sizeof(status));
520 if (!(status & SPI_STATUS_NRDY))
522 if (time_after_eq(jiffies, timeout)) {
523 netif_err(efx, hw, efx->net_dev,
524 "SPI write timeout on device %d"
525 " last status=0x%02x\n",
526 spi->device_id, status);
529 schedule_timeout_uninterruptible(1);
533 int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
534 loff_t start, size_t len, size_t *retlen, u8 *buffer)
536 size_t block_len, pos = 0;
537 unsigned int command;
541 block_len = min(len - pos, FALCON_SPI_MAX_LEN);
543 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
544 rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
545 buffer + pos, block_len);
550 /* Avoid locking up the system */
552 if (signal_pending(current)) {
564 falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
565 loff_t start, size_t len, size_t *retlen, const u8 *buffer)
567 u8 verify_buffer[FALCON_SPI_MAX_LEN];
568 size_t block_len, pos = 0;
569 unsigned int command;
573 rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
577 block_len = min(len - pos,
578 falcon_spi_write_limit(spi, start + pos));
579 command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
580 rc = falcon_spi_cmd(efx, spi, command, start + pos,
581 buffer + pos, NULL, block_len);
585 rc = falcon_spi_wait_write(efx, spi);
589 command = efx_spi_munge_command(spi, SPI_READ, start + pos);
590 rc = falcon_spi_cmd(efx, spi, command, start + pos,
591 NULL, verify_buffer, block_len);
592 if (memcmp(verify_buffer, buffer + pos, block_len)) {
599 /* Avoid locking up the system */
601 if (signal_pending(current)) {
612 /**************************************************************************
616 **************************************************************************
619 /* Configure the XAUI driver that is an output from Falcon */
620 static void falcon_setup_xaui(struct efx_nic *efx)
622 efx_oword_t sdctl, txdrv;
624 /* Move the XAUI into low power, unless there is no PHY, in
625 * which case the XAUI will have to drive a cable. */
626 if (efx->phy_type == PHY_TYPE_NONE)
629 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
630 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
631 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
632 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
633 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
634 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
635 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
636 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
637 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
638 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
640 EFX_POPULATE_OWORD_8(txdrv,
641 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
642 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
643 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
644 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
645 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
646 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
647 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
648 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
649 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
652 int falcon_reset_xaui(struct efx_nic *efx)
654 struct falcon_nic_data *nic_data = efx->nic_data;
658 /* Don't fetch MAC statistics over an XMAC reset */
659 WARN_ON(nic_data->stats_disable_count == 0);
661 /* Start reset sequence */
662 EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
663 efx_writeo(efx, ®, FR_AB_XX_PWR_RST);
665 /* Wait up to 10 ms for completion, then reinitialise */
666 for (count = 0; count < 1000; count++) {
667 efx_reado(efx, ®, FR_AB_XX_PWR_RST);
668 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
669 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
670 falcon_setup_xaui(efx);
675 netif_err(efx, hw, efx->net_dev,
676 "timed out waiting for XAUI/XGXS reset\n");
680 static void falcon_ack_status_intr(struct efx_nic *efx)
682 struct falcon_nic_data *nic_data = efx->nic_data;
685 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
688 /* We expect xgmii faults if the wireside link is down */
689 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
692 /* We can only use this interrupt to signal the negative edge of
693 * xaui_align [we have to poll the positive edge]. */
694 if (nic_data->xmac_poll_required)
697 efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK);
700 static bool falcon_xgxs_link_ok(struct efx_nic *efx)
703 bool align_done, link_ok = false;
706 /* Read link status */
707 efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
709 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
710 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
711 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
714 /* Clear link status ready for next read */
715 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
716 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
717 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
718 efx_writeo(efx, ®, FR_AB_XX_CORE_STAT);
723 static bool falcon_xmac_link_ok(struct efx_nic *efx)
726 * Check MAC's XGXS link status except when using XGMII loopback
727 * which bypasses the XGXS block.
728 * If possible, check PHY's XGXS link status except when using
731 return (efx->loopback_mode == LOOPBACK_XGMII ||
732 falcon_xgxs_link_ok(efx)) &&
733 (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
734 LOOPBACK_INTERNAL(efx) ||
735 efx_mdio_phyxgxs_lane_sync(efx));
738 static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
740 unsigned int max_frame_len;
742 bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
743 bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
745 /* Configure MAC - cut-thru mode is hard wired on */
746 EFX_POPULATE_OWORD_3(reg,
747 FRF_AB_XM_RX_JUMBO_MODE, 1,
748 FRF_AB_XM_TX_STAT_EN, 1,
749 FRF_AB_XM_RX_STAT_EN, 1);
750 efx_writeo(efx, ®, FR_AB_XM_GLB_CFG);
753 EFX_POPULATE_OWORD_6(reg,
755 FRF_AB_XM_TX_PRMBL, 1,
756 FRF_AB_XM_AUTO_PAD, 1,
758 FRF_AB_XM_FCNTL, tx_fc,
760 efx_writeo(efx, ®, FR_AB_XM_TX_CFG);
763 EFX_POPULATE_OWORD_5(reg,
765 FRF_AB_XM_AUTO_DEPAD, 0,
766 FRF_AB_XM_ACPT_ALL_MCAST, 1,
767 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
768 FRF_AB_XM_PASS_CRC_ERR, 1);
769 efx_writeo(efx, ®, FR_AB_XM_RX_CFG);
771 /* Set frame length */
772 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
773 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
774 efx_writeo(efx, ®, FR_AB_XM_RX_PARAM);
775 EFX_POPULATE_OWORD_2(reg,
776 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
777 FRF_AB_XM_TX_JUMBO_MODE, 1);
778 efx_writeo(efx, ®, FR_AB_XM_TX_PARAM);
780 EFX_POPULATE_OWORD_2(reg,
781 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
782 FRF_AB_XM_DIS_FCNTL, !rx_fc);
783 efx_writeo(efx, ®, FR_AB_XM_FC);
785 /* Set MAC address */
786 memcpy(®, &efx->net_dev->dev_addr[0], 4);
787 efx_writeo(efx, ®, FR_AB_XM_ADR_LO);
788 memcpy(®, &efx->net_dev->dev_addr[4], 2);
789 efx_writeo(efx, ®, FR_AB_XM_ADR_HI);
792 static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
795 bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
796 bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
797 bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
799 /* XGXS block is flaky and will need to be reset if moving
800 * into our out of XGMII, XGXS or XAUI loopbacks. */
801 if (EFX_WORKAROUND_5147(efx)) {
802 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
805 efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
806 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
808 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
810 efx_reado(efx, ®, FR_AB_XX_SD_CTL);
811 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
813 /* The PHY driver may have turned XAUI off */
814 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
815 (xaui_loopback != old_xaui_loopback) ||
816 (xgmii_loopback != old_xgmii_loopback));
819 falcon_reset_xaui(efx);
822 efx_reado(efx, ®, FR_AB_XX_CORE_STAT);
823 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
824 (xgxs_loopback || xaui_loopback) ?
825 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
826 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
827 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
828 efx_writeo(efx, ®, FR_AB_XX_CORE_STAT);
830 efx_reado(efx, ®, FR_AB_XX_SD_CTL);
831 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
832 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
833 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
834 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
835 efx_writeo(efx, ®, FR_AB_XX_SD_CTL);
839 /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
840 static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
842 bool mac_up = falcon_xmac_link_ok(efx);
844 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
845 efx_phy_mode_disabled(efx->phy_mode))
846 /* XAUI link is expected to be down */
849 falcon_stop_nic_stats(efx);
851 while (!mac_up && tries) {
852 netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
853 falcon_reset_xaui(efx);
856 mac_up = falcon_xmac_link_ok(efx);
860 falcon_start_nic_stats(efx);
865 static bool falcon_xmac_check_fault(struct efx_nic *efx)
867 return !falcon_xmac_link_ok_retry(efx, 5);
870 static int falcon_reconfigure_xmac(struct efx_nic *efx)
872 struct falcon_nic_data *nic_data = efx->nic_data;
874 falcon_reconfigure_xgxs_core(efx);
875 falcon_reconfigure_xmac_core(efx);
877 falcon_reconfigure_mac_wrapper(efx);
879 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
880 falcon_ack_status_intr(efx);
885 static void falcon_update_stats_xmac(struct efx_nic *efx)
887 struct efx_mac_stats *mac_stats = &efx->mac_stats;
889 /* Update MAC stats from DMAed values */
890 FALCON_STAT(efx, XgRxOctets, rx_bytes);
891 FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
892 FALCON_STAT(efx, XgRxPkts, rx_packets);
893 FALCON_STAT(efx, XgRxPktsOK, rx_good);
894 FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
895 FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
896 FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
897 FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
898 FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
899 FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
900 FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
901 FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
902 FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
903 FALCON_STAT(efx, XgRxAlignError, rx_align_error);
904 FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
905 FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
906 FALCON_STAT(efx, XgRxControlPkts, rx_control);
907 FALCON_STAT(efx, XgRxPausePkts, rx_pause);
908 FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
909 FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
910 FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
911 FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
912 FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
913 FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
914 FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
915 FALCON_STAT(efx, XgRxLengthError, rx_length_error);
916 FALCON_STAT(efx, XgTxPkts, tx_packets);
917 FALCON_STAT(efx, XgTxOctets, tx_bytes);
918 FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
919 FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
920 FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
921 FALCON_STAT(efx, XgTxControlPkts, tx_control);
922 FALCON_STAT(efx, XgTxPausePkts, tx_pause);
923 FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
924 FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
925 FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
926 FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
927 FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
928 FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
929 FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
930 FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
931 FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
932 FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
933 FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
934 FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
936 /* Update derived statistics */
937 efx_update_diff_stat(&mac_stats->tx_good_bytes,
938 mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
939 mac_stats->tx_control * 64);
940 efx_update_diff_stat(&mac_stats->rx_bad_bytes,
941 mac_stats->rx_bytes - mac_stats->rx_good_bytes -
942 mac_stats->rx_control * 64);
945 static void falcon_poll_xmac(struct efx_nic *efx)
947 struct falcon_nic_data *nic_data = efx->nic_data;
949 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
950 !nic_data->xmac_poll_required)
953 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
954 falcon_ack_status_intr(efx);
957 /**************************************************************************
961 **************************************************************************
964 static void falcon_push_multicast_hash(struct efx_nic *efx)
966 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
968 WARN_ON(!mutex_is_locked(&efx->mac_lock));
970 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
971 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
974 static void falcon_reset_macs(struct efx_nic *efx)
976 struct falcon_nic_data *nic_data = efx->nic_data;
977 efx_oword_t reg, mac_ctrl;
980 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
981 /* It's not safe to use GLB_CTL_REG to reset the
982 * macs, so instead use the internal MAC resets
984 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
985 efx_writeo(efx, ®, FR_AB_XM_GLB_CFG);
987 for (count = 0; count < 10000; count++) {
988 efx_reado(efx, ®, FR_AB_XM_GLB_CFG);
989 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
995 netif_err(efx, hw, efx->net_dev,
996 "timed out waiting for XMAC core reset\n");
999 /* Mac stats will fail whist the TX fifo is draining */
1000 WARN_ON(nic_data->stats_disable_count == 0);
1002 efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1003 EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1);
1004 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1006 efx_reado(efx, ®, FR_AB_GLB_CTL);
1007 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1008 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1009 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1010 efx_writeo(efx, ®, FR_AB_GLB_CTL);
1014 efx_reado(efx, ®, FR_AB_GLB_CTL);
1015 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1016 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1017 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1018 netif_dbg(efx, hw, efx->net_dev,
1019 "Completed MAC reset after %d loops\n",
1024 netif_err(efx, hw, efx->net_dev, "MAC reset failed\n");
1031 /* Ensure the correct MAC is selected before statistics
1032 * are re-enabled by the caller */
1033 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
1035 falcon_setup_xaui(efx);
1038 static void falcon_drain_tx_fifo(struct efx_nic *efx)
1042 if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) ||
1043 (efx->loopback_mode != LOOPBACK_NONE))
1046 efx_reado(efx, ®, FR_AB_MAC_CTRL);
1047 /* There is no point in draining more than once */
1048 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1051 falcon_reset_macs(efx);
1054 static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1058 if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
1061 /* Isolate the MAC -> RX */
1062 efx_reado(efx, ®, FR_AZ_RX_CFG);
1063 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1064 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1066 /* Isolate TX -> MAC */
1067 falcon_drain_tx_fifo(efx);
1070 static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1072 struct efx_link_state *link_state = &efx->link_state;
1074 int link_speed, isolate;
1076 isolate = !!ACCESS_ONCE(efx->reset_pending);
1078 switch (link_state->speed) {
1079 case 10000: link_speed = 3; break;
1080 case 1000: link_speed = 2; break;
1081 case 100: link_speed = 1; break;
1082 default: link_speed = 0; break;
1084 /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
1085 * as advertised. Disable to ensure packets are not
1086 * indefinitely held and TX queue can be flushed at any point
1087 * while the link is down. */
1088 EFX_POPULATE_OWORD_5(reg,
1089 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1090 FRF_AB_MAC_BCAD_ACPT, 1,
1091 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1092 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1093 FRF_AB_MAC_SPEED, link_speed);
1094 /* On B0, MAC backpressure can be disabled and packets get
1096 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
1097 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1098 !link_state->up || isolate);
1101 efx_writeo(efx, ®, FR_AB_MAC_CTRL);
1103 /* Restore the multicast hash registers. */
1104 falcon_push_multicast_hash(efx);
1106 efx_reado(efx, ®, FR_AZ_RX_CFG);
1107 /* Enable XOFF signal from RX FIFO (we enabled it during NIC
1108 * initialisation but it may read back as 0) */
1109 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
1110 /* Unisolate the MAC -> RX */
1111 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1112 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate);
1113 efx_writeo(efx, ®, FR_AZ_RX_CFG);
1116 static void falcon_stats_request(struct efx_nic *efx)
1118 struct falcon_nic_data *nic_data = efx->nic_data;
1121 WARN_ON(nic_data->stats_pending);
1122 WARN_ON(nic_data->stats_disable_count);
1124 if (nic_data->stats_dma_done == NULL)
1125 return; /* no mac selected */
1127 *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
1128 nic_data->stats_pending = true;
1129 wmb(); /* ensure done flag is clear */
1131 /* Initiate DMA transfer of stats */
1132 EFX_POPULATE_OWORD_2(reg,
1133 FRF_AB_MAC_STAT_DMA_CMD, 1,
1134 FRF_AB_MAC_STAT_DMA_ADR,
1135 efx->stats_buffer.dma_addr);
1136 efx_writeo(efx, ®, FR_AB_MAC_STAT_DMA);
1138 mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2));
1141 static void falcon_stats_complete(struct efx_nic *efx)
1143 struct falcon_nic_data *nic_data = efx->nic_data;
1145 if (!nic_data->stats_pending)
1148 nic_data->stats_pending = false;
1149 if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
1150 rmb(); /* read the done flag before the stats */
1151 falcon_update_stats_xmac(efx);
1153 netif_err(efx, hw, efx->net_dev,
1154 "timed out waiting for statistics\n");
1158 static void falcon_stats_timer_func(unsigned long context)
1160 struct efx_nic *efx = (struct efx_nic *)context;
1161 struct falcon_nic_data *nic_data = efx->nic_data;
1163 spin_lock(&efx->stats_lock);
1165 falcon_stats_complete(efx);
1166 if (nic_data->stats_disable_count == 0)
1167 falcon_stats_request(efx);
1169 spin_unlock(&efx->stats_lock);
1172 static bool falcon_loopback_link_poll(struct efx_nic *efx)
1174 struct efx_link_state old_state = efx->link_state;
1176 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1177 WARN_ON(!LOOPBACK_INTERNAL(efx));
1179 efx->link_state.fd = true;
1180 efx->link_state.fc = efx->wanted_fc;
1181 efx->link_state.up = true;
1182 efx->link_state.speed = 10000;
1184 return !efx_link_state_equal(&efx->link_state, &old_state);
1187 static int falcon_reconfigure_port(struct efx_nic *efx)
1191 WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0);
1193 /* Poll the PHY link state *before* reconfiguring it. This means we
1194 * will pick up the correct speed (in loopback) to select the correct
1197 if (LOOPBACK_INTERNAL(efx))
1198 falcon_loopback_link_poll(efx);
1200 efx->phy_op->poll(efx);
1202 falcon_stop_nic_stats(efx);
1203 falcon_deconfigure_mac_wrapper(efx);
1205 falcon_reset_macs(efx);
1207 efx->phy_op->reconfigure(efx);
1208 rc = falcon_reconfigure_xmac(efx);
1211 falcon_start_nic_stats(efx);
1213 /* Synchronise efx->link_state with the kernel */
1214 efx_link_status_changed(efx);
1219 /* TX flow control may automatically turn itself off if the link
1220 * partner (intermittently) stops responding to pause frames. There
1221 * isn't any indication that this has happened, so the best we do is
1222 * leave it up to the user to spot this and fix it by cycling transmit
1223 * flow control on this end.
1226 static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
1228 /* Schedule a reset to recover */
1229 efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
1232 static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
1234 /* Recover by resetting the EM block */
1235 falcon_stop_nic_stats(efx);
1236 falcon_drain_tx_fifo(efx);
1237 falcon_reconfigure_xmac(efx);
1238 falcon_start_nic_stats(efx);
1241 /**************************************************************************
1243 * PHY access via GMII
1245 **************************************************************************
1248 /* Wait for GMII access to complete */
1249 static int falcon_gmii_wait(struct efx_nic *efx)
1251 efx_oword_t md_stat;
1254 /* wait up to 50ms - taken max from datasheet */
1255 for (count = 0; count < 5000; count++) {
1256 efx_reado(efx, &md_stat, FR_AB_MD_STAT);
1257 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
1258 if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
1259 EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
1260 netif_err(efx, hw, efx->net_dev,
1261 "error from GMII access "
1263 EFX_OWORD_VAL(md_stat));
1270 netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n");
1274 /* Write an MDIO register of a PHY connected to Falcon. */
1275 static int falcon_mdio_write(struct net_device *net_dev,
1276 int prtad, int devad, u16 addr, u16 value)
1278 struct efx_nic *efx = netdev_priv(net_dev);
1279 struct falcon_nic_data *nic_data = efx->nic_data;
1283 netif_vdbg(efx, hw, efx->net_dev,
1284 "writing MDIO %d register %d.%d with 0x%04x\n",
1285 prtad, devad, addr, value);
1287 mutex_lock(&nic_data->mdio_lock);
1289 /* Check MDIO not currently being accessed */
1290 rc = falcon_gmii_wait(efx);
1294 /* Write the address/ID register */
1295 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1296 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
1298 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1299 FRF_AB_MD_DEV_ADR, devad);
1300 efx_writeo(efx, ®, FR_AB_MD_ID);
1303 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
1304 efx_writeo(efx, ®, FR_AB_MD_TXD);
1306 EFX_POPULATE_OWORD_2(reg,
1309 efx_writeo(efx, ®, FR_AB_MD_CS);
1311 /* Wait for data to be written */
1312 rc = falcon_gmii_wait(efx);
1314 /* Abort the write operation */
1315 EFX_POPULATE_OWORD_2(reg,
1318 efx_writeo(efx, ®, FR_AB_MD_CS);
1323 mutex_unlock(&nic_data->mdio_lock);
1327 /* Read an MDIO register of a PHY connected to Falcon. */
1328 static int falcon_mdio_read(struct net_device *net_dev,
1329 int prtad, int devad, u16 addr)
1331 struct efx_nic *efx = netdev_priv(net_dev);
1332 struct falcon_nic_data *nic_data = efx->nic_data;
1336 mutex_lock(&nic_data->mdio_lock);
1338 /* Check MDIO not currently being accessed */
1339 rc = falcon_gmii_wait(efx);
1343 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
1344 efx_writeo(efx, ®, FR_AB_MD_PHY_ADR);
1346 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
1347 FRF_AB_MD_DEV_ADR, devad);
1348 efx_writeo(efx, ®, FR_AB_MD_ID);
1350 /* Request data to be read */
1351 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
1352 efx_writeo(efx, ®, FR_AB_MD_CS);
1354 /* Wait for data to become available */
1355 rc = falcon_gmii_wait(efx);
1357 efx_reado(efx, ®, FR_AB_MD_RXD);
1358 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
1359 netif_vdbg(efx, hw, efx->net_dev,
1360 "read from MDIO %d register %d.%d, got %04x\n",
1361 prtad, devad, addr, rc);
1363 /* Abort the read operation */
1364 EFX_POPULATE_OWORD_2(reg,
1367 efx_writeo(efx, ®, FR_AB_MD_CS);
1369 netif_dbg(efx, hw, efx->net_dev,
1370 "read from MDIO %d register %d.%d, got error %d\n",
1371 prtad, devad, addr, rc);
1375 mutex_unlock(&nic_data->mdio_lock);
1379 /* This call is responsible for hooking in the MAC and PHY operations */
1380 static int falcon_probe_port(struct efx_nic *efx)
1382 struct falcon_nic_data *nic_data = efx->nic_data;
1385 switch (efx->phy_type) {
1386 case PHY_TYPE_SFX7101:
1387 efx->phy_op = &falcon_sfx7101_phy_ops;
1389 case PHY_TYPE_QT2022C2:
1390 case PHY_TYPE_QT2025C:
1391 efx->phy_op = &falcon_qt202x_phy_ops;
1393 case PHY_TYPE_TXC43128:
1394 efx->phy_op = &falcon_txc_phy_ops;
1397 netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n",
1402 /* Fill out MDIO structure and loopback modes */
1403 mutex_init(&nic_data->mdio_lock);
1404 efx->mdio.mdio_read = falcon_mdio_read;
1405 efx->mdio.mdio_write = falcon_mdio_write;
1406 rc = efx->phy_op->probe(efx);
1410 /* Initial assumption */
1411 efx->link_state.speed = 10000;
1412 efx->link_state.fd = true;
1414 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
1415 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
1416 efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
1418 efx->wanted_fc = EFX_FC_RX;
1419 if (efx->mdio.mmds & MDIO_DEVS_AN)
1420 efx->wanted_fc |= EFX_FC_AUTO;
1422 /* Allocate buffer for stats */
1423 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
1424 FALCON_MAC_STATS_SIZE, GFP_KERNEL);
1427 netif_dbg(efx, probe, efx->net_dev,
1428 "stats buffer at %llx (virt %p phys %llx)\n",
1429 (u64)efx->stats_buffer.dma_addr,
1430 efx->stats_buffer.addr,
1431 (u64)virt_to_phys(efx->stats_buffer.addr));
1432 nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
1437 static void falcon_remove_port(struct efx_nic *efx)
1439 efx->phy_op->remove(efx);
1440 efx_nic_free_buffer(efx, &efx->stats_buffer);
1443 /* Global events are basically PHY events */
1445 falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
1447 struct efx_nic *efx = channel->efx;
1448 struct falcon_nic_data *nic_data = efx->nic_data;
1450 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
1451 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
1452 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
1456 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
1457 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
1458 nic_data->xmac_poll_required = true;
1462 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
1463 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
1464 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
1465 netif_err(efx, rx_err, efx->net_dev,
1466 "channel %d seen global RX_RESET event. Resetting.\n",
1469 atomic_inc(&efx->rx_reset);
1470 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
1471 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1478 /**************************************************************************
1482 **************************************************************************/
1485 falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
1487 struct falcon_nic_data *nic_data = efx->nic_data;
1488 struct falcon_nvconfig *nvconfig;
1489 struct efx_spi_device *spi;
1491 int rc, magic_num, struct_ver;
1492 __le16 *word, *limit;
1495 if (efx_spi_present(&nic_data->spi_flash))
1496 spi = &nic_data->spi_flash;
1497 else if (efx_spi_present(&nic_data->spi_eeprom))
1498 spi = &nic_data->spi_eeprom;
1502 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
1505 nvconfig = region + FALCON_NVCONFIG_OFFSET;
1507 mutex_lock(&nic_data->spi_lock);
1508 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
1509 mutex_unlock(&nic_data->spi_lock);
1511 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
1512 efx_spi_present(&nic_data->spi_flash) ?
1513 "flash" : "EEPROM");
1518 magic_num = le16_to_cpu(nvconfig->board_magic_num);
1519 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
1522 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
1523 netif_err(efx, hw, efx->net_dev,
1524 "NVRAM bad magic 0x%x\n", magic_num);
1527 if (struct_ver < 2) {
1528 netif_err(efx, hw, efx->net_dev,
1529 "NVRAM has ancient version 0x%x\n", struct_ver);
1531 } else if (struct_ver < 4) {
1532 word = &nvconfig->board_magic_num;
1533 limit = (__le16 *) (nvconfig + 1);
1536 limit = region + FALCON_NVCONFIG_END;
1538 for (csum = 0; word < limit; ++word)
1539 csum += le16_to_cpu(*word);
1541 if (~csum & 0xffff) {
1542 netif_err(efx, hw, efx->net_dev,
1543 "NVRAM has incorrect checksum\n");
1549 memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig));
1556 static int falcon_test_nvram(struct efx_nic *efx)
1558 return falcon_read_nvram(efx, NULL);
1561 static const struct efx_farch_register_test falcon_b0_register_tests[] = {
1563 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
1565 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
1567 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
1568 { FR_AZ_TX_RESERVED,
1569 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
1571 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
1572 { FR_AZ_SRM_TX_DC_CFG,
1573 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
1575 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
1576 { FR_AZ_RX_DC_PF_WM,
1577 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
1579 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
1581 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
1583 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
1585 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
1587 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
1589 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
1590 { FR_AB_XM_RX_PARAM,
1591 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
1593 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
1595 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
1597 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
1601 falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
1603 enum reset_type reset_method = RESET_TYPE_INVISIBLE;
1606 mutex_lock(&efx->mac_lock);
1607 if (efx->loopback_modes) {
1608 /* We need the 312 clock from the PHY to test the XMAC
1609 * registers, so move into XGMII loopback if available */
1610 if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
1611 efx->loopback_mode = LOOPBACK_XGMII;
1613 efx->loopback_mode = __ffs(efx->loopback_modes);
1615 __efx_reconfigure_port(efx);
1616 mutex_unlock(&efx->mac_lock);
1618 efx_reset_down(efx, reset_method);
1621 efx_farch_test_registers(efx, falcon_b0_register_tests,
1622 ARRAY_SIZE(falcon_b0_register_tests))
1625 rc = falcon_reset_hw(efx, reset_method);
1626 rc2 = efx_reset_up(efx, reset_method, rc == 0);
1627 return rc ? rc : rc2;
1630 /**************************************************************************
1634 **************************************************************************
1637 static enum reset_type falcon_map_reset_reason(enum reset_type reason)
1640 case RESET_TYPE_RX_RECOVERY:
1641 case RESET_TYPE_RX_DESC_FETCH:
1642 case RESET_TYPE_TX_DESC_FETCH:
1643 case RESET_TYPE_TX_SKIP:
1644 /* These can occasionally occur due to hardware bugs.
1645 * We try to reset without disrupting the link.
1647 return RESET_TYPE_INVISIBLE;
1649 return RESET_TYPE_ALL;
1653 static int falcon_map_reset_flags(u32 *flags)
1656 FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER |
1657 ETH_RESET_OFFLOAD | ETH_RESET_MAC),
1658 FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY,
1659 FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ,
1662 if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) {
1663 *flags &= ~FALCON_RESET_WORLD;
1664 return RESET_TYPE_WORLD;
1667 if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) {
1668 *flags &= ~FALCON_RESET_ALL;
1669 return RESET_TYPE_ALL;
1672 if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) {
1673 *flags &= ~FALCON_RESET_INVISIBLE;
1674 return RESET_TYPE_INVISIBLE;
1680 /* Resets NIC to known state. This routine must be called in process
1681 * context and is allowed to sleep. */
1682 static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1684 struct falcon_nic_data *nic_data = efx->nic_data;
1685 efx_oword_t glb_ctl_reg_ker;
1688 netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n",
1689 RESET_TYPE(method));
1691 /* Initiate device reset */
1692 if (method == RESET_TYPE_WORLD) {
1693 rc = pci_save_state(efx->pci_dev);
1695 netif_err(efx, drv, efx->net_dev,
1696 "failed to backup PCI state of primary "
1697 "function prior to hardware reset\n");
1700 if (efx_nic_is_dual_func(efx)) {
1701 rc = pci_save_state(nic_data->pci_dev2);
1703 netif_err(efx, drv, efx->net_dev,
1704 "failed to backup PCI state of "
1705 "secondary function prior to "
1706 "hardware reset\n");
1711 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
1712 FRF_AB_EXT_PHY_RST_DUR,
1713 FFE_AB_EXT_PHY_RST_DUR_10240US,
1716 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
1717 /* exclude PHY from "invisible" reset */
1718 FRF_AB_EXT_PHY_RST_CTL,
1719 method == RESET_TYPE_INVISIBLE,
1720 /* exclude EEPROM/flash and PCIe */
1721 FRF_AB_PCIE_CORE_RST_CTL, 1,
1722 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
1723 FRF_AB_PCIE_SD_RST_CTL, 1,
1724 FRF_AB_EE_RST_CTL, 1,
1725 FRF_AB_EXT_PHY_RST_DUR,
1726 FFE_AB_EXT_PHY_RST_DUR_10240US,
1729 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1731 netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n");
1732 schedule_timeout_uninterruptible(HZ / 20);
1734 /* Restore PCI configuration if needed */
1735 if (method == RESET_TYPE_WORLD) {
1736 if (efx_nic_is_dual_func(efx))
1737 pci_restore_state(nic_data->pci_dev2);
1738 pci_restore_state(efx->pci_dev);
1739 netif_dbg(efx, drv, efx->net_dev,
1740 "successfully restored PCI config\n");
1743 /* Assert that reset complete */
1744 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
1745 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
1747 netif_err(efx, hw, efx->net_dev,
1748 "timed out waiting for hardware reset\n");
1751 netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n");
1755 /* pci_save_state() and pci_restore_state() MUST be called in pairs */
1757 pci_restore_state(efx->pci_dev);
1763 static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1765 struct falcon_nic_data *nic_data = efx->nic_data;
1768 mutex_lock(&nic_data->spi_lock);
1769 rc = __falcon_reset_hw(efx, method);
1770 mutex_unlock(&nic_data->spi_lock);
1775 static void falcon_monitor(struct efx_nic *efx)
1780 BUG_ON(!mutex_is_locked(&efx->mac_lock));
1782 rc = falcon_board(efx)->type->monitor(efx);
1784 netif_err(efx, hw, efx->net_dev,
1785 "Board sensor %s; shutting down PHY\n",
1786 (rc == -ERANGE) ? "reported fault" : "failed");
1787 efx->phy_mode |= PHY_MODE_LOW_POWER;
1788 rc = __efx_reconfigure_port(efx);
1792 if (LOOPBACK_INTERNAL(efx))
1793 link_changed = falcon_loopback_link_poll(efx);
1795 link_changed = efx->phy_op->poll(efx);
1798 falcon_stop_nic_stats(efx);
1799 falcon_deconfigure_mac_wrapper(efx);
1801 falcon_reset_macs(efx);
1802 rc = falcon_reconfigure_xmac(efx);
1805 falcon_start_nic_stats(efx);
1807 efx_link_status_changed(efx);
1810 falcon_poll_xmac(efx);
1813 /* Zeroes out the SRAM contents. This routine must be called in
1814 * process context and is allowed to sleep.
1816 static int falcon_reset_sram(struct efx_nic *efx)
1818 efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
1821 /* Set the SRAM wake/sleep GPIO appropriately. */
1822 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1823 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
1824 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
1825 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
1827 /* Initiate SRAM reset */
1828 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
1829 FRF_AZ_SRM_INIT_EN, 1,
1830 FRF_AZ_SRM_NB_SZ, 0);
1831 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1833 /* Wait for SRAM reset to complete */
1836 netif_dbg(efx, hw, efx->net_dev,
1837 "waiting for SRAM reset (attempt %d)...\n", count);
1839 /* SRAM reset is slow; expect around 16ms */
1840 schedule_timeout_uninterruptible(HZ / 50);
1842 /* Check for reset complete */
1843 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
1844 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
1845 netif_dbg(efx, hw, efx->net_dev,
1846 "SRAM reset complete\n");
1850 } while (++count < 20); /* wait up to 0.4 sec */
1852 netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n");
1856 static void falcon_spi_device_init(struct efx_nic *efx,
1857 struct efx_spi_device *spi_device,
1858 unsigned int device_id, u32 device_type)
1860 if (device_type != 0) {
1861 spi_device->device_id = device_id;
1863 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
1864 spi_device->addr_len =
1865 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN);
1866 spi_device->munge_address = (spi_device->size == 1 << 9 &&
1867 spi_device->addr_len == 1);
1868 spi_device->erase_command =
1869 SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD);
1870 spi_device->erase_size =
1871 1 << SPI_DEV_TYPE_FIELD(device_type,
1872 SPI_DEV_TYPE_ERASE_SIZE);
1873 spi_device->block_size =
1874 1 << SPI_DEV_TYPE_FIELD(device_type,
1875 SPI_DEV_TYPE_BLOCK_SIZE);
1877 spi_device->size = 0;
1881 /* Extract non-volatile configuration */
1882 static int falcon_probe_nvconfig(struct efx_nic *efx)
1884 struct falcon_nic_data *nic_data = efx->nic_data;
1885 struct falcon_nvconfig *nvconfig;
1888 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
1892 rc = falcon_read_nvram(efx, nvconfig);
1896 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1897 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1899 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1900 falcon_spi_device_init(
1901 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1902 le32_to_cpu(nvconfig->board_v3
1903 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1904 falcon_spi_device_init(
1905 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1906 le32_to_cpu(nvconfig->board_v3
1907 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1910 /* Read the MAC addresses */
1911 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1913 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1914 efx->phy_type, efx->mdio.prtad);
1916 rc = falcon_probe_board(efx,
1917 le16_to_cpu(nvconfig->board_v2.board_revision));
1923 static void falcon_dimension_resources(struct efx_nic *efx)
1925 efx->rx_dc_base = 0x20000;
1926 efx->tx_dc_base = 0x26000;
1929 /* Probe all SPI devices on the NIC */
1930 static void falcon_probe_spi_devices(struct efx_nic *efx)
1932 struct falcon_nic_data *nic_data = efx->nic_data;
1933 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1936 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
1937 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
1938 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1940 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
1941 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
1942 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
1943 netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n",
1944 boot_dev == FFE_AB_SPI_DEVICE_FLASH ?
1945 "flash" : "EEPROM");
1947 /* Disable VPD and set clock dividers to safe
1948 * values for initial programming. */
1950 netif_dbg(efx, probe, efx->net_dev,
1951 "Booted from internal ASIC settings;"
1952 " setting SPI config\n");
1953 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
1954 /* 125 MHz / 7 ~= 20 MHz */
1955 FRF_AB_EE_SF_CLOCK_DIV, 7,
1956 /* 125 MHz / 63 ~= 2 MHz */
1957 FRF_AB_EE_EE_CLOCK_DIV, 63);
1958 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1961 mutex_init(&nic_data->spi_lock);
1963 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1964 falcon_spi_device_init(efx, &nic_data->spi_flash,
1965 FFE_AB_SPI_DEVICE_FLASH,
1966 default_flash_type);
1967 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1968 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1969 FFE_AB_SPI_DEVICE_EEPROM,
1973 static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
1978 static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
1980 /* Map everything up to and including the RSS indirection table.
1981 * The PCI core takes care of mapping the MSI-X tables.
1983 return FR_BZ_RX_INDIRECTION_TBL +
1984 FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
1987 static int falcon_probe_nic(struct efx_nic *efx)
1989 struct falcon_nic_data *nic_data;
1990 struct falcon_board *board;
1993 /* Allocate storage for hardware specific data */
1994 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1997 efx->nic_data = nic_data;
2001 if (efx_farch_fpga_ver(efx) != 0) {
2002 netif_err(efx, probe, efx->net_dev,
2003 "Falcon FPGA not supported\n");
2007 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2008 efx_oword_t nic_stat;
2009 struct pci_dev *dev;
2010 u8 pci_rev = efx->pci_dev->revision;
2012 if ((pci_rev == 0xff) || (pci_rev == 0)) {
2013 netif_err(efx, probe, efx->net_dev,
2014 "Falcon rev A0 not supported\n");
2017 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2018 if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) {
2019 netif_err(efx, probe, efx->net_dev,
2020 "Falcon rev A1 1G not supported\n");
2023 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2024 netif_err(efx, probe, efx->net_dev,
2025 "Falcon rev A1 PCI-X not supported\n");
2029 dev = pci_dev_get(efx->pci_dev);
2030 while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE,
2031 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1,
2033 if (dev->bus == efx->pci_dev->bus &&
2034 dev->devfn == efx->pci_dev->devfn + 1) {
2035 nic_data->pci_dev2 = dev;
2039 if (!nic_data->pci_dev2) {
2040 netif_err(efx, probe, efx->net_dev,
2041 "failed to find secondary function\n");
2047 /* Now we can reset the NIC */
2048 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
2050 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
2054 /* Allocate memory for INT_KER */
2055 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
2059 BUG_ON(efx->irq_status.dma_addr & 0x0f);
2061 netif_dbg(efx, probe, efx->net_dev,
2062 "INT_KER at %llx (virt %p phys %llx)\n",
2063 (u64)efx->irq_status.dma_addr,
2064 efx->irq_status.addr,
2065 (u64)virt_to_phys(efx->irq_status.addr));
2067 falcon_probe_spi_devices(efx);
2069 /* Read in the non-volatile configuration */
2070 rc = falcon_probe_nvconfig(efx);
2073 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
2077 efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
2079 efx->timer_quantum_ns = 4968; /* 621 cycles */
2081 /* Initialise I2C adapter */
2082 board = falcon_board(efx);
2083 board->i2c_adap.owner = THIS_MODULE;
2084 board->i2c_data = falcon_i2c_bit_operations;
2085 board->i2c_data.data = efx;
2086 board->i2c_adap.algo_data = &board->i2c_data;
2087 board->i2c_adap.dev.parent = &efx->pci_dev->dev;
2088 strlcpy(board->i2c_adap.name, "SFC4000 GPIO",
2089 sizeof(board->i2c_adap.name));
2090 rc = i2c_bit_add_bus(&board->i2c_adap);
2094 rc = falcon_board(efx)->type->init(efx);
2096 netif_err(efx, probe, efx->net_dev,
2097 "failed to initialise board\n");
2101 nic_data->stats_disable_count = 1;
2102 setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func,
2103 (unsigned long)efx);
2108 i2c_del_adapter(&board->i2c_adap);
2109 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2111 efx_nic_free_buffer(efx, &efx->irq_status);
2114 if (nic_data->pci_dev2) {
2115 pci_dev_put(nic_data->pci_dev2);
2116 nic_data->pci_dev2 = NULL;
2120 kfree(efx->nic_data);
2124 static void falcon_init_rx_cfg(struct efx_nic *efx)
2126 /* RX control FIFO thresholds (32 entries) */
2127 const unsigned ctrl_xon_thr = 20;
2128 const unsigned ctrl_xoff_thr = 25;
2131 efx_reado(efx, ®, FR_AZ_RX_CFG);
2132 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
2133 /* Data FIFO size is 5.5K. The RX DMA engine only
2134 * supports scattering for user-mode queues, but will
2135 * split DMA writes at intervals of RX_USR_BUF_SIZE
2136 * (32-byte units) even for kernel-mode queues. We
2137 * set it to be so large that that never happens.
2139 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2140 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2142 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
2143 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
2144 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2145 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2147 /* Data FIFO size is 80K; register fields moved */
2148 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2149 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2150 EFX_RX_USR_BUF_SIZE >> 5);
2151 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
2152 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
2153 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
2154 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2155 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2156 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2158 /* Enable hash insertion. This is broken for the
2159 * 'Falcon' hash so also select Toeplitz TCP/IPv4 and
2161 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1);
2162 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1);
2163 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1);
2165 /* Always enable XOFF signal from RX FIFO. We enable
2166 * or disable transmission of pause frames at the MAC. */
2167 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1);
2168 efx_writeo(efx, ®, FR_AZ_RX_CFG);
2171 /* This call performs hardware-specific global initialisation, such as
2172 * defining the descriptor cache sizes and number of RSS channels.
2173 * It does not set up any buffers, descriptor rings or event queues.
2175 static int falcon_init_nic(struct efx_nic *efx)
2180 /* Use on-chip SRAM */
2181 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2182 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2183 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2185 rc = falcon_reset_sram(efx);
2189 /* Clear the parity enables on the TX data fifos as
2190 * they produce false parity errors because of timing issues
2192 if (EFX_WORKAROUND_5129(efx)) {
2193 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2194 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2195 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2198 if (EFX_WORKAROUND_7244(efx)) {
2199 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2200 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2201 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2202 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2203 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2204 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2207 /* XXX This is documented only for Falcon A0/A1 */
2208 /* Setup RX. Wait for descriptor is broken and must
2209 * be disabled. RXDP recovery shouldn't be needed, but is.
2211 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
2212 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
2213 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
2214 if (EFX_WORKAROUND_5583(efx))
2215 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
2216 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
2218 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
2219 * descriptors (which is bad).
2221 efx_reado(efx, &temp, FR_AZ_TX_CFG);
2222 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
2223 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
2225 falcon_init_rx_cfg(efx);
2227 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
2228 /* Set hash key for IPv4 */
2229 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
2230 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
2232 /* Set destination of both TX and RX Flush events */
2233 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
2234 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
2237 efx_farch_init_common(efx);
2242 static void falcon_remove_nic(struct efx_nic *efx)
2244 struct falcon_nic_data *nic_data = efx->nic_data;
2245 struct falcon_board *board = falcon_board(efx);
2247 board->type->fini(efx);
2249 /* Remove I2C adapter and clear it in preparation for a retry */
2250 i2c_del_adapter(&board->i2c_adap);
2251 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
2253 efx_nic_free_buffer(efx, &efx->irq_status);
2255 __falcon_reset_hw(efx, RESET_TYPE_ALL);
2257 /* Release the second function after the reset */
2258 if (nic_data->pci_dev2) {
2259 pci_dev_put(nic_data->pci_dev2);
2260 nic_data->pci_dev2 = NULL;
2263 /* Tear down the private nic state */
2264 kfree(efx->nic_data);
2265 efx->nic_data = NULL;
2268 static void falcon_update_nic_stats(struct efx_nic *efx)
2270 struct falcon_nic_data *nic_data = efx->nic_data;
2273 if (nic_data->stats_disable_count)
2276 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
2277 efx->n_rx_nodesc_drop_cnt +=
2278 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
2280 if (nic_data->stats_pending &&
2281 *nic_data->stats_dma_done == FALCON_STATS_DONE) {
2282 nic_data->stats_pending = false;
2283 rmb(); /* read the done flag before the stats */
2284 falcon_update_stats_xmac(efx);
2288 void falcon_start_nic_stats(struct efx_nic *efx)
2290 struct falcon_nic_data *nic_data = efx->nic_data;
2292 spin_lock_bh(&efx->stats_lock);
2293 if (--nic_data->stats_disable_count == 0)
2294 falcon_stats_request(efx);
2295 spin_unlock_bh(&efx->stats_lock);
2298 void falcon_stop_nic_stats(struct efx_nic *efx)
2300 struct falcon_nic_data *nic_data = efx->nic_data;
2305 spin_lock_bh(&efx->stats_lock);
2306 ++nic_data->stats_disable_count;
2307 spin_unlock_bh(&efx->stats_lock);
2309 del_timer_sync(&nic_data->stats_timer);
2311 /* Wait enough time for the most recent transfer to
2313 for (i = 0; i < 4 && nic_data->stats_pending; i++) {
2314 if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
2319 spin_lock_bh(&efx->stats_lock);
2320 falcon_stats_complete(efx);
2321 spin_unlock_bh(&efx->stats_lock);
2324 static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
2326 falcon_board(efx)->type->set_id_led(efx, mode);
2329 /**************************************************************************
2333 **************************************************************************
2336 static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
2340 memset(&wol->sopass, 0, sizeof(wol->sopass));
2343 static int falcon_set_wol(struct efx_nic *efx, u32 type)
2350 /**************************************************************************
2352 * Revision-dependent attributes used by efx.c and nic.c
2354 **************************************************************************
2357 const struct efx_nic_type falcon_a1_nic_type = {
2358 .mem_map_size = falcon_a1_mem_map_size,
2359 .probe = falcon_probe_nic,
2360 .remove = falcon_remove_nic,
2361 .init = falcon_init_nic,
2362 .dimension_resources = falcon_dimension_resources,
2363 .fini = falcon_irq_ack_a1,
2364 .monitor = falcon_monitor,
2365 .map_reset_reason = falcon_map_reset_reason,
2366 .map_reset_flags = falcon_map_reset_flags,
2367 .reset = falcon_reset_hw,
2368 .probe_port = falcon_probe_port,
2369 .remove_port = falcon_remove_port,
2370 .handle_global_event = falcon_handle_global_event,
2371 .fini_dmaq = efx_farch_fini_dmaq,
2372 .prepare_flush = falcon_prepare_flush,
2373 .finish_flush = efx_port_dummy_op_void,
2374 .update_stats = falcon_update_nic_stats,
2375 .start_stats = falcon_start_nic_stats,
2376 .stop_stats = falcon_stop_nic_stats,
2377 .set_id_led = falcon_set_id_led,
2378 .push_irq_moderation = falcon_push_irq_moderation,
2379 .reconfigure_port = falcon_reconfigure_port,
2380 .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
2381 .reconfigure_mac = falcon_reconfigure_xmac,
2382 .check_mac_fault = falcon_xmac_check_fault,
2383 .get_wol = falcon_get_wol,
2384 .set_wol = falcon_set_wol,
2385 .resume_wol = efx_port_dummy_op_void,
2386 .test_nvram = falcon_test_nvram,
2387 .irq_enable_master = efx_farch_irq_enable_master,
2388 .irq_test_generate = efx_farch_irq_test_generate,
2389 .irq_disable_non_ev = efx_farch_irq_disable_master,
2390 .irq_handle_msi = efx_farch_msi_interrupt,
2391 .irq_handle_legacy = falcon_legacy_interrupt_a1,
2392 .tx_probe = efx_farch_tx_probe,
2393 .tx_init = efx_farch_tx_init,
2394 .tx_remove = efx_farch_tx_remove,
2395 .tx_write = efx_farch_tx_write,
2396 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2397 .rx_probe = efx_farch_rx_probe,
2398 .rx_init = efx_farch_rx_init,
2399 .rx_remove = efx_farch_rx_remove,
2400 .rx_write = efx_farch_rx_write,
2401 .rx_defer_refill = efx_farch_rx_defer_refill,
2402 .ev_probe = efx_farch_ev_probe,
2403 .ev_init = efx_farch_ev_init,
2404 .ev_fini = efx_farch_ev_fini,
2405 .ev_remove = efx_farch_ev_remove,
2406 .ev_process = efx_farch_ev_process,
2407 .ev_read_ack = efx_farch_ev_read_ack,
2408 .ev_test_generate = efx_farch_ev_test_generate,
2410 .revision = EFX_REV_FALCON_A1,
2411 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
2412 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
2413 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
2414 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
2415 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
2416 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2417 .rx_buffer_padding = 0x24,
2418 .can_rx_scatter = false,
2419 .max_interrupt_mode = EFX_INT_MODE_MSI,
2420 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2421 .offload_features = NETIF_F_IP_CSUM,
2425 const struct efx_nic_type falcon_b0_nic_type = {
2426 .mem_map_size = falcon_b0_mem_map_size,
2427 .probe = falcon_probe_nic,
2428 .remove = falcon_remove_nic,
2429 .init = falcon_init_nic,
2430 .dimension_resources = falcon_dimension_resources,
2431 .fini = efx_port_dummy_op_void,
2432 .monitor = falcon_monitor,
2433 .map_reset_reason = falcon_map_reset_reason,
2434 .map_reset_flags = falcon_map_reset_flags,
2435 .reset = falcon_reset_hw,
2436 .probe_port = falcon_probe_port,
2437 .remove_port = falcon_remove_port,
2438 .handle_global_event = falcon_handle_global_event,
2439 .fini_dmaq = efx_farch_fini_dmaq,
2440 .prepare_flush = falcon_prepare_flush,
2441 .finish_flush = efx_port_dummy_op_void,
2442 .update_stats = falcon_update_nic_stats,
2443 .start_stats = falcon_start_nic_stats,
2444 .stop_stats = falcon_stop_nic_stats,
2445 .set_id_led = falcon_set_id_led,
2446 .push_irq_moderation = falcon_push_irq_moderation,
2447 .reconfigure_port = falcon_reconfigure_port,
2448 .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
2449 .reconfigure_mac = falcon_reconfigure_xmac,
2450 .check_mac_fault = falcon_xmac_check_fault,
2451 .get_wol = falcon_get_wol,
2452 .set_wol = falcon_set_wol,
2453 .resume_wol = efx_port_dummy_op_void,
2454 .test_chip = falcon_b0_test_chip,
2455 .test_nvram = falcon_test_nvram,
2456 .irq_enable_master = efx_farch_irq_enable_master,
2457 .irq_test_generate = efx_farch_irq_test_generate,
2458 .irq_disable_non_ev = efx_farch_irq_disable_master,
2459 .irq_handle_msi = efx_farch_msi_interrupt,
2460 .irq_handle_legacy = efx_farch_legacy_interrupt,
2461 .tx_probe = efx_farch_tx_probe,
2462 .tx_init = efx_farch_tx_init,
2463 .tx_remove = efx_farch_tx_remove,
2464 .tx_write = efx_farch_tx_write,
2465 .rx_push_indir_table = efx_farch_rx_push_indir_table,
2466 .rx_probe = efx_farch_rx_probe,
2467 .rx_init = efx_farch_rx_init,
2468 .rx_remove = efx_farch_rx_remove,
2469 .rx_write = efx_farch_rx_write,
2470 .rx_defer_refill = efx_farch_rx_defer_refill,
2471 .ev_probe = efx_farch_ev_probe,
2472 .ev_init = efx_farch_ev_init,
2473 .ev_fini = efx_farch_ev_fini,
2474 .ev_remove = efx_farch_ev_remove,
2475 .ev_process = efx_farch_ev_process,
2476 .ev_read_ack = efx_farch_ev_read_ack,
2477 .ev_test_generate = efx_farch_ev_test_generate,
2479 .revision = EFX_REV_FALCON_B0,
2480 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
2481 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
2482 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
2483 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
2484 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
2485 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
2486 .rx_buffer_hash_size = 0x10,
2487 .rx_buffer_padding = 0,
2488 .can_rx_scatter = true,
2489 .max_interrupt_mode = EFX_INT_MODE_MSIX,
2490 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2491 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,