2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2011 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
47 #include <net/checksum.h>
50 #include <asm/system.h>
52 #include <asm/byteorder.h>
53 #include <asm/uaccess.h>
56 #include <asm/idprom.h>
65 #define DRV_MODULE_NAME "tg3"
67 #define TG3_MIN_NUM 117
68 #define DRV_MODULE_VERSION \
69 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
70 #define DRV_MODULE_RELDATE "January 25, 2011"
72 #define TG3_DEF_MAC_MODE 0
73 #define TG3_DEF_RX_MODE 0
74 #define TG3_DEF_TX_MODE 0
75 #define TG3_DEF_MSG_ENABLE \
85 /* length of time before we decide the hardware is borked,
86 * and dev->tx_timeout() should be called to fix the problem
88 #define TG3_TX_TIMEOUT (5 * HZ)
90 /* hardware minimum and maximum for a single frame's data payload */
91 #define TG3_MIN_MTU 60
92 #define TG3_MAX_MTU(tp) \
93 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
95 /* These numbers seem to be hard coded in the NIC firmware somehow.
96 * You can't change the ring sizes, but you can change where you place
97 * them in the NIC onboard memory.
99 #define TG3_RX_STD_RING_SIZE(tp) \
100 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
102 RX_STD_MAX_SIZE_5717 : 512)
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JMB_RING_SIZE(tp) \
105 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || \
106 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) ? \
108 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
109 #define TG3_RSS_INDIR_TBL_SIZE 128
111 /* Do not place this n-ring entries value into the tp struct itself,
112 * we really want to expose these constants to GCC so that modulo et
113 * al. operations are done with shifts and masks instead of with
114 * hw multiply/modulo instructions. Another solution would be to
115 * replace things like '% foo' with '& (foo - 1)'.
118 #define TG3_TX_RING_SIZE 512
119 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
121 #define TG3_RX_STD_RING_BYTES(tp) \
122 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
123 #define TG3_RX_JMB_RING_BYTES(tp) \
124 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
125 #define TG3_RX_RCB_RING_BYTES(tp) \
126 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
127 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
129 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
131 #define TG3_DMA_BYTE_ENAB 64
133 #define TG3_RX_STD_DMA_SZ 1536
134 #define TG3_RX_JMB_DMA_SZ 9046
136 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
138 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
139 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
141 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
142 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
144 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
145 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
147 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
148 * that are at least dword aligned when used in PCIX mode. The driver
149 * works around this bug by double copying the packet. This workaround
150 * is built into the normal double copy length check for efficiency.
152 * However, the double copy is only necessary on those architectures
153 * where unaligned memory accesses are inefficient. For those architectures
154 * where unaligned memory accesses incur little penalty, we can reintegrate
155 * the 5701 in the normal rx path. Doing so saves a device structure
156 * dereference by hardcoding the double copy threshold in place.
158 #define TG3_RX_COPY_THRESHOLD 256
159 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
160 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
162 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
165 /* minimum number of free TX descriptors required to wake up TX process */
166 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
168 #define TG3_RAW_IP_ALIGN 2
170 /* number of ETHTOOL_GSTATS u64's */
171 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
173 #define TG3_NUM_TEST 6
175 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
177 #define FIRMWARE_TG3 "tigon/tg3.bin"
178 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
179 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
181 static char version[] __devinitdata =
182 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
184 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
185 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
186 MODULE_LICENSE("GPL");
187 MODULE_VERSION(DRV_MODULE_VERSION);
188 MODULE_FIRMWARE(FIRMWARE_TG3);
189 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
190 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
192 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
193 module_param(tg3_debug, int, 0);
194 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
196 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
269 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
270 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
271 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
272 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
273 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
274 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
275 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
279 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
281 static const struct {
282 const char string[ETH_GSTRING_LEN];
283 } ethtool_stats_keys[TG3_NUM_STATS] = {
286 { "rx_ucast_packets" },
287 { "rx_mcast_packets" },
288 { "rx_bcast_packets" },
290 { "rx_align_errors" },
291 { "rx_xon_pause_rcvd" },
292 { "rx_xoff_pause_rcvd" },
293 { "rx_mac_ctrl_rcvd" },
294 { "rx_xoff_entered" },
295 { "rx_frame_too_long_errors" },
297 { "rx_undersize_packets" },
298 { "rx_in_length_errors" },
299 { "rx_out_length_errors" },
300 { "rx_64_or_less_octet_packets" },
301 { "rx_65_to_127_octet_packets" },
302 { "rx_128_to_255_octet_packets" },
303 { "rx_256_to_511_octet_packets" },
304 { "rx_512_to_1023_octet_packets" },
305 { "rx_1024_to_1522_octet_packets" },
306 { "rx_1523_to_2047_octet_packets" },
307 { "rx_2048_to_4095_octet_packets" },
308 { "rx_4096_to_8191_octet_packets" },
309 { "rx_8192_to_9022_octet_packets" },
316 { "tx_flow_control" },
318 { "tx_single_collisions" },
319 { "tx_mult_collisions" },
321 { "tx_excessive_collisions" },
322 { "tx_late_collisions" },
323 { "tx_collide_2times" },
324 { "tx_collide_3times" },
325 { "tx_collide_4times" },
326 { "tx_collide_5times" },
327 { "tx_collide_6times" },
328 { "tx_collide_7times" },
329 { "tx_collide_8times" },
330 { "tx_collide_9times" },
331 { "tx_collide_10times" },
332 { "tx_collide_11times" },
333 { "tx_collide_12times" },
334 { "tx_collide_13times" },
335 { "tx_collide_14times" },
336 { "tx_collide_15times" },
337 { "tx_ucast_packets" },
338 { "tx_mcast_packets" },
339 { "tx_bcast_packets" },
340 { "tx_carrier_sense_errors" },
344 { "dma_writeq_full" },
345 { "dma_write_prioq_full" },
349 { "rx_threshold_hit" },
351 { "dma_readq_full" },
352 { "dma_read_prioq_full" },
353 { "tx_comp_queue_full" },
355 { "ring_set_send_prod_index" },
356 { "ring_status_update" },
358 { "nic_avoided_irqs" },
359 { "nic_tx_threshold_hit" }
362 static const struct {
363 const char string[ETH_GSTRING_LEN];
364 } ethtool_test_keys[TG3_NUM_TEST] = {
365 { "nvram test (online) " },
366 { "link test (online) " },
367 { "register test (offline)" },
368 { "memory test (offline)" },
369 { "loopback test (offline)" },
370 { "interrupt test (offline)" },
373 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
375 writel(val, tp->regs + off);
378 static u32 tg3_read32(struct tg3 *tp, u32 off)
380 return readl(tp->regs + off);
383 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
385 writel(val, tp->aperegs + off);
388 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
390 return readl(tp->aperegs + off);
393 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
397 spin_lock_irqsave(&tp->indirect_lock, flags);
398 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
399 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
400 spin_unlock_irqrestore(&tp->indirect_lock, flags);
403 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
405 writel(val, tp->regs + off);
406 readl(tp->regs + off);
409 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
414 spin_lock_irqsave(&tp->indirect_lock, flags);
415 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
416 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
417 spin_unlock_irqrestore(&tp->indirect_lock, flags);
421 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
425 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
426 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
427 TG3_64BIT_REG_LOW, val);
430 if (off == TG3_RX_STD_PROD_IDX_REG) {
431 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
432 TG3_64BIT_REG_LOW, val);
436 spin_lock_irqsave(&tp->indirect_lock, flags);
437 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
438 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
439 spin_unlock_irqrestore(&tp->indirect_lock, flags);
441 /* In indirect mode when disabling interrupts, we also need
442 * to clear the interrupt bit in the GRC local ctrl register.
444 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
446 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
447 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
451 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
456 spin_lock_irqsave(&tp->indirect_lock, flags);
457 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
458 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
459 spin_unlock_irqrestore(&tp->indirect_lock, flags);
463 /* usec_wait specifies the wait time in usec when writing to certain registers
464 * where it is unsafe to read back the register without some delay.
465 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
466 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
468 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
470 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
471 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
472 /* Non-posted methods */
473 tp->write32(tp, off, val);
476 tg3_write32(tp, off, val);
481 /* Wait again after the read for the posted method to guarantee that
482 * the wait time is met.
488 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
490 tp->write32_mbox(tp, off, val);
491 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
492 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
493 tp->read32_mbox(tp, off);
496 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
498 void __iomem *mbox = tp->regs + off;
500 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
502 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
506 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
508 return readl(tp->regs + off + GRCMBOX_BASE);
511 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
513 writel(val, tp->regs + off + GRCMBOX_BASE);
516 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
517 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
518 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
519 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
520 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
522 #define tw32(reg, val) tp->write32(tp, reg, val)
523 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
524 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
525 #define tr32(reg) tp->read32(tp, reg)
527 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
531 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
532 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
535 spin_lock_irqsave(&tp->indirect_lock, flags);
536 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
537 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
538 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
540 /* Always leave this as zero. */
541 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
543 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
544 tw32_f(TG3PCI_MEM_WIN_DATA, val);
546 /* Always leave this as zero. */
547 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
552 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
556 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
557 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
562 spin_lock_irqsave(&tp->indirect_lock, flags);
563 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
564 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
565 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
567 /* Always leave this as zero. */
568 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
570 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
571 *val = tr32(TG3PCI_MEM_WIN_DATA);
573 /* Always leave this as zero. */
574 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
576 spin_unlock_irqrestore(&tp->indirect_lock, flags);
579 static void tg3_ape_lock_init(struct tg3 *tp)
584 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
585 regbase = TG3_APE_LOCK_GRANT;
587 regbase = TG3_APE_PER_LOCK_GRANT;
589 /* Make sure the driver hasn't any stale locks. */
590 for (i = 0; i < 8; i++)
591 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
594 static int tg3_ape_lock(struct tg3 *tp, int locknum)
598 u32 status, req, gnt;
600 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
604 case TG3_APE_LOCK_GRC:
605 case TG3_APE_LOCK_MEM:
611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
612 req = TG3_APE_LOCK_REQ;
613 gnt = TG3_APE_LOCK_GRANT;
615 req = TG3_APE_PER_LOCK_REQ;
616 gnt = TG3_APE_PER_LOCK_GRANT;
621 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
623 /* Wait for up to 1 millisecond to acquire lock. */
624 for (i = 0; i < 100; i++) {
625 status = tg3_ape_read32(tp, gnt + off);
626 if (status == APE_LOCK_GRANT_DRIVER)
631 if (status != APE_LOCK_GRANT_DRIVER) {
632 /* Revoke the lock request. */
633 tg3_ape_write32(tp, gnt + off,
634 APE_LOCK_GRANT_DRIVER);
642 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
646 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
650 case TG3_APE_LOCK_GRC:
651 case TG3_APE_LOCK_MEM:
657 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
658 gnt = TG3_APE_LOCK_GRANT;
660 gnt = TG3_APE_PER_LOCK_GRANT;
662 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
665 static void tg3_disable_ints(struct tg3 *tp)
669 tw32(TG3PCI_MISC_HOST_CTRL,
670 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
671 for (i = 0; i < tp->irq_max; i++)
672 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
675 static void tg3_enable_ints(struct tg3 *tp)
682 tw32(TG3PCI_MISC_HOST_CTRL,
683 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
685 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
686 for (i = 0; i < tp->irq_cnt; i++) {
687 struct tg3_napi *tnapi = &tp->napi[i];
689 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
690 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
691 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
693 tp->coal_now |= tnapi->coal_now;
696 /* Force an initial interrupt */
697 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
698 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
699 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
701 tw32(HOSTCC_MODE, tp->coal_now);
703 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
706 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
708 struct tg3 *tp = tnapi->tp;
709 struct tg3_hw_status *sblk = tnapi->hw_status;
710 unsigned int work_exists = 0;
712 /* check for phy events */
713 if (!(tp->tg3_flags &
714 (TG3_FLAG_USE_LINKCHG_REG |
715 TG3_FLAG_POLL_SERDES))) {
716 if (sblk->status & SD_STATUS_LINK_CHG)
719 /* check for RX/TX work to do */
720 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
721 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
728 * similar to tg3_enable_ints, but it accurately determines whether there
729 * is new work pending and can return without flushing the PIO write
730 * which reenables interrupts
732 static void tg3_int_reenable(struct tg3_napi *tnapi)
734 struct tg3 *tp = tnapi->tp;
736 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
739 /* When doing tagged status, this work check is unnecessary.
740 * The last_tag we write above tells the chip which piece of
741 * work we've completed.
743 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
745 tw32(HOSTCC_MODE, tp->coalesce_mode |
746 HOSTCC_MODE_ENABLE | tnapi->coal_now);
749 static void tg3_switch_clocks(struct tg3 *tp)
754 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
755 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
758 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
760 orig_clock_ctrl = clock_ctrl;
761 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
762 CLOCK_CTRL_CLKRUN_OENABLE |
764 tp->pci_clock_ctrl = clock_ctrl;
766 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
767 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
768 tw32_wait_f(TG3PCI_CLOCK_CTRL,
769 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
771 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
772 tw32_wait_f(TG3PCI_CLOCK_CTRL,
774 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
776 tw32_wait_f(TG3PCI_CLOCK_CTRL,
777 clock_ctrl | (CLOCK_CTRL_ALTCLK),
780 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
783 #define PHY_BUSY_LOOPS 5000
785 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
791 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
793 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
799 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
800 MI_COM_PHY_ADDR_MASK);
801 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
802 MI_COM_REG_ADDR_MASK);
803 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
805 tw32_f(MAC_MI_COM, frame_val);
807 loops = PHY_BUSY_LOOPS;
810 frame_val = tr32(MAC_MI_COM);
812 if ((frame_val & MI_COM_BUSY) == 0) {
814 frame_val = tr32(MAC_MI_COM);
822 *val = frame_val & MI_COM_DATA_MASK;
826 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
827 tw32_f(MAC_MI_MODE, tp->mi_mode);
834 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
840 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
841 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
844 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
846 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
850 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
851 MI_COM_PHY_ADDR_MASK);
852 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
853 MI_COM_REG_ADDR_MASK);
854 frame_val |= (val & MI_COM_DATA_MASK);
855 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
857 tw32_f(MAC_MI_COM, frame_val);
859 loops = PHY_BUSY_LOOPS;
862 frame_val = tr32(MAC_MI_COM);
863 if ((frame_val & MI_COM_BUSY) == 0) {
865 frame_val = tr32(MAC_MI_COM);
875 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
876 tw32_f(MAC_MI_MODE, tp->mi_mode);
883 static int tg3_bmcr_reset(struct tg3 *tp)
888 /* OK, reset it, and poll the BMCR_RESET bit until it
889 * clears or we time out.
891 phy_control = BMCR_RESET;
892 err = tg3_writephy(tp, MII_BMCR, phy_control);
898 err = tg3_readphy(tp, MII_BMCR, &phy_control);
902 if ((phy_control & BMCR_RESET) == 0) {
914 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
916 struct tg3 *tp = bp->priv;
919 spin_lock_bh(&tp->lock);
921 if (tg3_readphy(tp, reg, &val))
924 spin_unlock_bh(&tp->lock);
929 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
931 struct tg3 *tp = bp->priv;
934 spin_lock_bh(&tp->lock);
936 if (tg3_writephy(tp, reg, val))
939 spin_unlock_bh(&tp->lock);
944 static int tg3_mdio_reset(struct mii_bus *bp)
949 static void tg3_mdio_config_5785(struct tg3 *tp)
952 struct phy_device *phydev;
954 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
955 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
956 case PHY_ID_BCM50610:
957 case PHY_ID_BCM50610M:
958 val = MAC_PHYCFG2_50610_LED_MODES;
960 case PHY_ID_BCMAC131:
961 val = MAC_PHYCFG2_AC131_LED_MODES;
963 case PHY_ID_RTL8211C:
964 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
966 case PHY_ID_RTL8201E:
967 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
973 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
974 tw32(MAC_PHYCFG2, val);
976 val = tr32(MAC_PHYCFG1);
977 val &= ~(MAC_PHYCFG1_RGMII_INT |
978 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
979 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
980 tw32(MAC_PHYCFG1, val);
985 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
986 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
987 MAC_PHYCFG2_FMODE_MASK_MASK |
988 MAC_PHYCFG2_GMODE_MASK_MASK |
989 MAC_PHYCFG2_ACT_MASK_MASK |
990 MAC_PHYCFG2_QUAL_MASK_MASK |
991 MAC_PHYCFG2_INBAND_ENABLE;
993 tw32(MAC_PHYCFG2, val);
995 val = tr32(MAC_PHYCFG1);
996 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
997 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
998 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
999 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1000 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1001 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1002 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1004 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1005 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1006 tw32(MAC_PHYCFG1, val);
1008 val = tr32(MAC_EXT_RGMII_MODE);
1009 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1010 MAC_RGMII_MODE_RX_QUALITY |
1011 MAC_RGMII_MODE_RX_ACTIVITY |
1012 MAC_RGMII_MODE_RX_ENG_DET |
1013 MAC_RGMII_MODE_TX_ENABLE |
1014 MAC_RGMII_MODE_TX_LOWPWR |
1015 MAC_RGMII_MODE_TX_RESET);
1016 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1017 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1018 val |= MAC_RGMII_MODE_RX_INT_B |
1019 MAC_RGMII_MODE_RX_QUALITY |
1020 MAC_RGMII_MODE_RX_ACTIVITY |
1021 MAC_RGMII_MODE_RX_ENG_DET;
1022 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1023 val |= MAC_RGMII_MODE_TX_ENABLE |
1024 MAC_RGMII_MODE_TX_LOWPWR |
1025 MAC_RGMII_MODE_TX_RESET;
1027 tw32(MAC_EXT_RGMII_MODE, val);
1030 static void tg3_mdio_start(struct tg3 *tp)
1032 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1033 tw32_f(MAC_MI_MODE, tp->mi_mode);
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1038 tg3_mdio_config_5785(tp);
1041 static int tg3_mdio_init(struct tg3 *tp)
1045 struct phy_device *phydev;
1047 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1048 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1051 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1053 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1054 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1056 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1057 TG3_CPMU_PHY_STRAP_IS_SERDES;
1061 tp->phy_addr = TG3_PHY_MII_ADDR;
1065 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1066 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1069 tp->mdio_bus = mdiobus_alloc();
1070 if (tp->mdio_bus == NULL)
1073 tp->mdio_bus->name = "tg3 mdio bus";
1074 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1075 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1076 tp->mdio_bus->priv = tp;
1077 tp->mdio_bus->parent = &tp->pdev->dev;
1078 tp->mdio_bus->read = &tg3_mdio_read;
1079 tp->mdio_bus->write = &tg3_mdio_write;
1080 tp->mdio_bus->reset = &tg3_mdio_reset;
1081 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1082 tp->mdio_bus->irq = &tp->mdio_irq[0];
1084 for (i = 0; i < PHY_MAX_ADDR; i++)
1085 tp->mdio_bus->irq[i] = PHY_POLL;
1087 /* The bus registration will look for all the PHYs on the mdio bus.
1088 * Unfortunately, it does not ensure the PHY is powered up before
1089 * accessing the PHY ID registers. A chip reset is the
1090 * quickest way to bring the device back to an operational state..
1092 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1095 i = mdiobus_register(tp->mdio_bus);
1097 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1098 mdiobus_free(tp->mdio_bus);
1102 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1104 if (!phydev || !phydev->drv) {
1105 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1106 mdiobus_unregister(tp->mdio_bus);
1107 mdiobus_free(tp->mdio_bus);
1111 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1112 case PHY_ID_BCM57780:
1113 phydev->interface = PHY_INTERFACE_MODE_GMII;
1114 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1116 case PHY_ID_BCM50610:
1117 case PHY_ID_BCM50610M:
1118 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1119 PHY_BRCM_RX_REFCLK_UNUSED |
1120 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1121 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1122 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1123 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1124 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1125 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1126 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1127 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1129 case PHY_ID_RTL8211C:
1130 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1132 case PHY_ID_RTL8201E:
1133 case PHY_ID_BCMAC131:
1134 phydev->interface = PHY_INTERFACE_MODE_MII;
1135 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1136 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1140 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1142 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1143 tg3_mdio_config_5785(tp);
1148 static void tg3_mdio_fini(struct tg3 *tp)
1150 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1151 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1152 mdiobus_unregister(tp->mdio_bus);
1153 mdiobus_free(tp->mdio_bus);
1157 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1161 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1165 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1169 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1170 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1174 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1180 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1184 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1188 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1192 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1193 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1197 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1203 /* tp->lock is held. */
1204 static inline void tg3_generate_fw_event(struct tg3 *tp)
1208 val = tr32(GRC_RX_CPU_EVENT);
1209 val |= GRC_RX_CPU_DRIVER_EVENT;
1210 tw32_f(GRC_RX_CPU_EVENT, val);
1212 tp->last_event_jiffies = jiffies;
1215 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1217 /* tp->lock is held. */
1218 static void tg3_wait_for_event_ack(struct tg3 *tp)
1221 unsigned int delay_cnt;
1224 /* If enough time has passed, no wait is necessary. */
1225 time_remain = (long)(tp->last_event_jiffies + 1 +
1226 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1228 if (time_remain < 0)
1231 /* Check if we can shorten the wait time. */
1232 delay_cnt = jiffies_to_usecs(time_remain);
1233 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1234 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1235 delay_cnt = (delay_cnt >> 3) + 1;
1237 for (i = 0; i < delay_cnt; i++) {
1238 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1244 /* tp->lock is held. */
1245 static void tg3_ump_link_report(struct tg3 *tp)
1250 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1251 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1254 tg3_wait_for_event_ack(tp);
1256 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1258 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1261 if (!tg3_readphy(tp, MII_BMCR, ®))
1263 if (!tg3_readphy(tp, MII_BMSR, ®))
1264 val |= (reg & 0xffff);
1265 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1268 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1270 if (!tg3_readphy(tp, MII_LPA, ®))
1271 val |= (reg & 0xffff);
1272 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1275 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1276 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1278 if (!tg3_readphy(tp, MII_STAT1000, ®))
1279 val |= (reg & 0xffff);
1281 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1283 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1287 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1289 tg3_generate_fw_event(tp);
1292 static void tg3_link_report(struct tg3 *tp)
1294 if (!netif_carrier_ok(tp->dev)) {
1295 netif_info(tp, link, tp->dev, "Link is down\n");
1296 tg3_ump_link_report(tp);
1297 } else if (netif_msg_link(tp)) {
1298 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1299 (tp->link_config.active_speed == SPEED_1000 ?
1301 (tp->link_config.active_speed == SPEED_100 ?
1303 (tp->link_config.active_duplex == DUPLEX_FULL ?
1306 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1307 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1309 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1311 tg3_ump_link_report(tp);
1315 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1319 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1320 miireg = ADVERTISE_PAUSE_CAP;
1321 else if (flow_ctrl & FLOW_CTRL_TX)
1322 miireg = ADVERTISE_PAUSE_ASYM;
1323 else if (flow_ctrl & FLOW_CTRL_RX)
1324 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1331 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1335 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1336 miireg = ADVERTISE_1000XPAUSE;
1337 else if (flow_ctrl & FLOW_CTRL_TX)
1338 miireg = ADVERTISE_1000XPSE_ASYM;
1339 else if (flow_ctrl & FLOW_CTRL_RX)
1340 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1347 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1351 if (lcladv & ADVERTISE_1000XPAUSE) {
1352 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1353 if (rmtadv & LPA_1000XPAUSE)
1354 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1355 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1358 if (rmtadv & LPA_1000XPAUSE)
1359 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1361 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1362 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1369 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1373 u32 old_rx_mode = tp->rx_mode;
1374 u32 old_tx_mode = tp->tx_mode;
1376 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1377 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1379 autoneg = tp->link_config.autoneg;
1381 if (autoneg == AUTONEG_ENABLE &&
1382 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1383 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1384 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1386 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1388 flowctrl = tp->link_config.flowctrl;
1390 tp->link_config.active_flowctrl = flowctrl;
1392 if (flowctrl & FLOW_CTRL_RX)
1393 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1395 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1397 if (old_rx_mode != tp->rx_mode)
1398 tw32_f(MAC_RX_MODE, tp->rx_mode);
1400 if (flowctrl & FLOW_CTRL_TX)
1401 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1403 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1405 if (old_tx_mode != tp->tx_mode)
1406 tw32_f(MAC_TX_MODE, tp->tx_mode);
1409 static void tg3_adjust_link(struct net_device *dev)
1411 u8 oldflowctrl, linkmesg = 0;
1412 u32 mac_mode, lcl_adv, rmt_adv;
1413 struct tg3 *tp = netdev_priv(dev);
1414 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1416 spin_lock_bh(&tp->lock);
1418 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1419 MAC_MODE_HALF_DUPLEX);
1421 oldflowctrl = tp->link_config.active_flowctrl;
1427 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1428 mac_mode |= MAC_MODE_PORT_MODE_MII;
1429 else if (phydev->speed == SPEED_1000 ||
1430 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1431 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1433 mac_mode |= MAC_MODE_PORT_MODE_MII;
1435 if (phydev->duplex == DUPLEX_HALF)
1436 mac_mode |= MAC_MODE_HALF_DUPLEX;
1438 lcl_adv = tg3_advert_flowctrl_1000T(
1439 tp->link_config.flowctrl);
1442 rmt_adv = LPA_PAUSE_CAP;
1443 if (phydev->asym_pause)
1444 rmt_adv |= LPA_PAUSE_ASYM;
1447 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1449 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1451 if (mac_mode != tp->mac_mode) {
1452 tp->mac_mode = mac_mode;
1453 tw32_f(MAC_MODE, tp->mac_mode);
1457 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1458 if (phydev->speed == SPEED_10)
1460 MAC_MI_STAT_10MBPS_MODE |
1461 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1463 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1466 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1467 tw32(MAC_TX_LENGTHS,
1468 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1469 (6 << TX_LENGTHS_IPG_SHIFT) |
1470 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1472 tw32(MAC_TX_LENGTHS,
1473 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1474 (6 << TX_LENGTHS_IPG_SHIFT) |
1475 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1477 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1478 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1479 phydev->speed != tp->link_config.active_speed ||
1480 phydev->duplex != tp->link_config.active_duplex ||
1481 oldflowctrl != tp->link_config.active_flowctrl)
1484 tp->link_config.active_speed = phydev->speed;
1485 tp->link_config.active_duplex = phydev->duplex;
1487 spin_unlock_bh(&tp->lock);
1490 tg3_link_report(tp);
1493 static int tg3_phy_init(struct tg3 *tp)
1495 struct phy_device *phydev;
1497 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1500 /* Bring the PHY back to a known state. */
1503 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1505 /* Attach the MAC to the PHY. */
1506 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1507 phydev->dev_flags, phydev->interface);
1508 if (IS_ERR(phydev)) {
1509 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1510 return PTR_ERR(phydev);
1513 /* Mask with MAC supported features. */
1514 switch (phydev->interface) {
1515 case PHY_INTERFACE_MODE_GMII:
1516 case PHY_INTERFACE_MODE_RGMII:
1517 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1518 phydev->supported &= (PHY_GBIT_FEATURES |
1520 SUPPORTED_Asym_Pause);
1524 case PHY_INTERFACE_MODE_MII:
1525 phydev->supported &= (PHY_BASIC_FEATURES |
1527 SUPPORTED_Asym_Pause);
1530 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1534 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1536 phydev->advertising = phydev->supported;
1541 static void tg3_phy_start(struct tg3 *tp)
1543 struct phy_device *phydev;
1545 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1548 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1551 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1552 phydev->speed = tp->link_config.orig_speed;
1553 phydev->duplex = tp->link_config.orig_duplex;
1554 phydev->autoneg = tp->link_config.orig_autoneg;
1555 phydev->advertising = tp->link_config.orig_advertising;
1560 phy_start_aneg(phydev);
1563 static void tg3_phy_stop(struct tg3 *tp)
1565 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1568 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1571 static void tg3_phy_fini(struct tg3 *tp)
1573 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1574 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1575 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1579 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1583 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1585 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1590 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1594 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1596 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1601 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1605 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1608 tg3_writephy(tp, MII_TG3_FET_TEST,
1609 phytest | MII_TG3_FET_SHADOW_EN);
1610 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1612 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1614 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1615 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1617 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1621 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1625 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1626 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1627 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1628 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1631 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1632 tg3_phy_fet_toggle_apd(tp, enable);
1636 reg = MII_TG3_MISC_SHDW_WREN |
1637 MII_TG3_MISC_SHDW_SCR5_SEL |
1638 MII_TG3_MISC_SHDW_SCR5_LPED |
1639 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1640 MII_TG3_MISC_SHDW_SCR5_SDTL |
1641 MII_TG3_MISC_SHDW_SCR5_C125OE;
1642 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1643 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1645 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1648 reg = MII_TG3_MISC_SHDW_WREN |
1649 MII_TG3_MISC_SHDW_APD_SEL |
1650 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1652 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1654 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1657 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1661 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1662 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1665 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1668 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1669 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1671 tg3_writephy(tp, MII_TG3_FET_TEST,
1672 ephy | MII_TG3_FET_SHADOW_EN);
1673 if (!tg3_readphy(tp, reg, &phy)) {
1675 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1677 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1678 tg3_writephy(tp, reg, phy);
1680 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1683 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1684 MII_TG3_AUXCTL_SHDWSEL_MISC;
1685 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1686 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1688 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1690 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1691 phy |= MII_TG3_AUXCTL_MISC_WREN;
1692 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1697 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1701 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1704 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1705 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1706 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1707 (val | (1 << 15) | (1 << 4)));
1710 static void tg3_phy_apply_otp(struct tg3 *tp)
1719 /* Enable SM_DSP clock and tx 6dB coding. */
1720 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1721 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1722 MII_TG3_AUXCTL_ACTL_TX_6DB;
1723 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1725 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1726 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1727 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1729 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1730 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1731 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1733 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1734 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1735 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1737 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1738 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1740 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1741 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1743 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1744 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1745 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1747 /* Turn off SM_DSP clock. */
1748 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1749 MII_TG3_AUXCTL_ACTL_TX_6DB;
1750 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1753 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1757 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1762 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1763 current_link_up == 1 &&
1764 tp->link_config.active_duplex == DUPLEX_FULL &&
1765 (tp->link_config.active_speed == SPEED_100 ||
1766 tp->link_config.active_speed == SPEED_1000)) {
1769 if (tp->link_config.active_speed == SPEED_1000)
1770 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1772 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1774 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1776 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1777 TG3_CL45_D7_EEERES_STAT, &val);
1780 case TG3_CL45_D7_EEERES_STAT_LP_1000T:
1781 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
1784 case ASIC_REV_57765:
1785 /* Enable SM_DSP clock and tx 6dB coding. */
1786 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1787 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1788 MII_TG3_AUXCTL_ACTL_TX_6DB;
1789 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1791 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
1793 /* Turn off SM_DSP clock. */
1794 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1795 MII_TG3_AUXCTL_ACTL_TX_6DB;
1796 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1799 case TG3_CL45_D7_EEERES_STAT_LP_100TX:
1804 if (!tp->setlpicnt) {
1805 val = tr32(TG3_CPMU_EEE_MODE);
1806 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1810 static int tg3_wait_macro_done(struct tg3 *tp)
1817 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1818 if ((tmp32 & 0x1000) == 0)
1828 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1830 static const u32 test_pat[4][6] = {
1831 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1832 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1833 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1834 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1838 for (chan = 0; chan < 4; chan++) {
1841 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1842 (chan * 0x2000) | 0x0200);
1843 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1845 for (i = 0; i < 6; i++)
1846 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1849 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1850 if (tg3_wait_macro_done(tp)) {
1855 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1856 (chan * 0x2000) | 0x0200);
1857 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1858 if (tg3_wait_macro_done(tp)) {
1863 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1864 if (tg3_wait_macro_done(tp)) {
1869 for (i = 0; i < 6; i += 2) {
1872 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1873 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1874 tg3_wait_macro_done(tp)) {
1880 if (low != test_pat[chan][i] ||
1881 high != test_pat[chan][i+1]) {
1882 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1883 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1884 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1894 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1898 for (chan = 0; chan < 4; chan++) {
1901 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1902 (chan * 0x2000) | 0x0200);
1903 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1904 for (i = 0; i < 6; i++)
1905 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1906 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1907 if (tg3_wait_macro_done(tp))
1914 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1916 u32 reg32, phy9_orig;
1917 int retries, do_phy_reset, err;
1923 err = tg3_bmcr_reset(tp);
1929 /* Disable transmitter and interrupt. */
1930 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1934 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1936 /* Set full-duplex, 1000 mbps. */
1937 tg3_writephy(tp, MII_BMCR,
1938 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1940 /* Set to master mode. */
1941 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1944 tg3_writephy(tp, MII_TG3_CTRL,
1945 (MII_TG3_CTRL_AS_MASTER |
1946 MII_TG3_CTRL_ENABLE_AS_MASTER));
1948 /* Enable SM_DSP_CLOCK and 6dB. */
1949 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1951 /* Block the PHY control access. */
1952 tg3_phydsp_write(tp, 0x8005, 0x0800);
1954 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1957 } while (--retries);
1959 err = tg3_phy_reset_chanpat(tp);
1963 tg3_phydsp_write(tp, 0x8005, 0x0000);
1965 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1966 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
1968 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1970 /* Set Extended packet length bit for jumbo frames */
1971 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1973 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1976 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1978 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1980 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1987 /* This will reset the tigon3 PHY if there is no valid
1988 * link unless the FORCE argument is non-zero.
1990 static int tg3_phy_reset(struct tg3 *tp)
1995 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1996 val = tr32(GRC_MISC_CFG);
1997 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2000 err = tg3_readphy(tp, MII_BMSR, &val);
2001 err |= tg3_readphy(tp, MII_BMSR, &val);
2005 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2006 netif_carrier_off(tp->dev);
2007 tg3_link_report(tp);
2010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2013 err = tg3_phy_reset_5703_4_5(tp);
2020 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2021 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2022 cpmuctrl = tr32(TG3_CPMU_CTRL);
2023 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2025 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2028 err = tg3_bmcr_reset(tp);
2032 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2033 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2034 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2036 tw32(TG3_CPMU_CTRL, cpmuctrl);
2039 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2040 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2041 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2042 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2043 CPMU_LSPD_1000MB_MACCLK_12_5) {
2044 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2046 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2050 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2051 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
2052 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2055 tg3_phy_apply_otp(tp);
2057 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2058 tg3_phy_toggle_apd(tp, true);
2060 tg3_phy_toggle_apd(tp, false);
2063 if (tp->phy_flags & TG3_PHYFLG_ADC_BUG) {
2064 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2065 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2066 tg3_phydsp_write(tp, 0x000a, 0x0323);
2067 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2069 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2070 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2071 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2073 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2074 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2075 tg3_phydsp_write(tp, 0x000a, 0x310b);
2076 tg3_phydsp_write(tp, 0x201f, 0x9506);
2077 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2078 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2079 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2080 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2081 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2082 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2083 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2084 tg3_writephy(tp, MII_TG3_TEST1,
2085 MII_TG3_TEST1_TRIM_EN | 0x4);
2087 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2088 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2090 /* Set Extended packet length bit (bit 14) on all chips that */
2091 /* support jumbo frames */
2092 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2093 /* Cannot do read-modify-write on 5401 */
2094 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2095 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2096 /* Set bit 14 with read-modify-write to preserve other bits */
2097 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2098 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
2099 tg3_writephy(tp, MII_TG3_AUX_CTRL, val | 0x4000);
2102 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2103 * jumbo frames transmission.
2105 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2106 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2107 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2108 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2111 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2112 /* adjust output voltage */
2113 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2116 tg3_phy_toggle_automdix(tp, 1);
2117 tg3_phy_set_wirespeed(tp);
2121 static void tg3_frob_aux_power(struct tg3 *tp)
2123 struct tg3 *tp_peer = tp;
2125 /* The GPIOs do something completely different on 57765. */
2126 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2128 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2131 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2132 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2133 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2134 struct net_device *dev_peer;
2136 dev_peer = pci_get_drvdata(tp->pdev_peer);
2137 /* remove_one() may have been run on the peer. */
2141 tp_peer = netdev_priv(dev_peer);
2144 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2145 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2146 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2147 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2150 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2151 (GRC_LCLCTRL_GPIO_OE0 |
2152 GRC_LCLCTRL_GPIO_OE1 |
2153 GRC_LCLCTRL_GPIO_OE2 |
2154 GRC_LCLCTRL_GPIO_OUTPUT0 |
2155 GRC_LCLCTRL_GPIO_OUTPUT1),
2157 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2158 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2159 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2160 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2161 GRC_LCLCTRL_GPIO_OE1 |
2162 GRC_LCLCTRL_GPIO_OE2 |
2163 GRC_LCLCTRL_GPIO_OUTPUT0 |
2164 GRC_LCLCTRL_GPIO_OUTPUT1 |
2166 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2168 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2169 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2171 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2172 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2175 u32 grc_local_ctrl = 0;
2177 if (tp_peer != tp &&
2178 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2181 /* Workaround to prevent overdrawing Amps. */
2182 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2184 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2185 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2186 grc_local_ctrl, 100);
2189 /* On 5753 and variants, GPIO2 cannot be used. */
2190 no_gpio2 = tp->nic_sram_data_cfg &
2191 NIC_SRAM_DATA_CFG_NO_GPIO2;
2193 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2194 GRC_LCLCTRL_GPIO_OE1 |
2195 GRC_LCLCTRL_GPIO_OE2 |
2196 GRC_LCLCTRL_GPIO_OUTPUT1 |
2197 GRC_LCLCTRL_GPIO_OUTPUT2;
2199 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2200 GRC_LCLCTRL_GPIO_OUTPUT2);
2202 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2203 grc_local_ctrl, 100);
2205 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2207 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2208 grc_local_ctrl, 100);
2211 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2212 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2213 grc_local_ctrl, 100);
2217 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2218 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2219 if (tp_peer != tp &&
2220 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2223 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2224 (GRC_LCLCTRL_GPIO_OE1 |
2225 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2227 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2228 GRC_LCLCTRL_GPIO_OE1, 100);
2230 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2231 (GRC_LCLCTRL_GPIO_OE1 |
2232 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2237 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2239 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2241 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2242 if (speed != SPEED_10)
2244 } else if (speed == SPEED_10)
2250 static int tg3_setup_phy(struct tg3 *, int);
2252 #define RESET_KIND_SHUTDOWN 0
2253 #define RESET_KIND_INIT 1
2254 #define RESET_KIND_SUSPEND 2
2256 static void tg3_write_sig_post_reset(struct tg3 *, int);
2257 static int tg3_halt_cpu(struct tg3 *, u32);
2259 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2263 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2265 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2266 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2269 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2270 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2271 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2278 val = tr32(GRC_MISC_CFG);
2279 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2282 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2284 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2287 tg3_writephy(tp, MII_ADVERTISE, 0);
2288 tg3_writephy(tp, MII_BMCR,
2289 BMCR_ANENABLE | BMCR_ANRESTART);
2291 tg3_writephy(tp, MII_TG3_FET_TEST,
2292 phytest | MII_TG3_FET_SHADOW_EN);
2293 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2294 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2296 MII_TG3_FET_SHDW_AUXMODE4,
2299 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2302 } else if (do_low_power) {
2303 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2304 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2306 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2307 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2308 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2309 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2310 MII_TG3_AUXCTL_PCTL_VREG_11V);
2313 /* The PHY should not be powered down on some chips because
2316 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2317 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2318 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2319 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2322 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2323 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2324 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2325 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2326 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2327 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2330 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2333 /* tp->lock is held. */
2334 static int tg3_nvram_lock(struct tg3 *tp)
2336 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2339 if (tp->nvram_lock_cnt == 0) {
2340 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2341 for (i = 0; i < 8000; i++) {
2342 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2347 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2351 tp->nvram_lock_cnt++;
2356 /* tp->lock is held. */
2357 static void tg3_nvram_unlock(struct tg3 *tp)
2359 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2360 if (tp->nvram_lock_cnt > 0)
2361 tp->nvram_lock_cnt--;
2362 if (tp->nvram_lock_cnt == 0)
2363 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2367 /* tp->lock is held. */
2368 static void tg3_enable_nvram_access(struct tg3 *tp)
2370 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2371 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2372 u32 nvaccess = tr32(NVRAM_ACCESS);
2374 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2378 /* tp->lock is held. */
2379 static void tg3_disable_nvram_access(struct tg3 *tp)
2381 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2382 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2383 u32 nvaccess = tr32(NVRAM_ACCESS);
2385 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2389 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2390 u32 offset, u32 *val)
2395 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2398 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2399 EEPROM_ADDR_DEVID_MASK |
2401 tw32(GRC_EEPROM_ADDR,
2403 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2404 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2405 EEPROM_ADDR_ADDR_MASK) |
2406 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2408 for (i = 0; i < 1000; i++) {
2409 tmp = tr32(GRC_EEPROM_ADDR);
2411 if (tmp & EEPROM_ADDR_COMPLETE)
2415 if (!(tmp & EEPROM_ADDR_COMPLETE))
2418 tmp = tr32(GRC_EEPROM_DATA);
2421 * The data will always be opposite the native endian
2422 * format. Perform a blind byteswap to compensate.
2429 #define NVRAM_CMD_TIMEOUT 10000
2431 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2435 tw32(NVRAM_CMD, nvram_cmd);
2436 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2438 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2444 if (i == NVRAM_CMD_TIMEOUT)
2450 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2452 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2453 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2454 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2455 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2456 (tp->nvram_jedecnum == JEDEC_ATMEL))
2458 addr = ((addr / tp->nvram_pagesize) <<
2459 ATMEL_AT45DB0X1B_PAGE_POS) +
2460 (addr % tp->nvram_pagesize);
2465 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2467 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2468 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2469 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2470 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2471 (tp->nvram_jedecnum == JEDEC_ATMEL))
2473 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2474 tp->nvram_pagesize) +
2475 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2480 /* NOTE: Data read in from NVRAM is byteswapped according to
2481 * the byteswapping settings for all other register accesses.
2482 * tg3 devices are BE devices, so on a BE machine, the data
2483 * returned will be exactly as it is seen in NVRAM. On a LE
2484 * machine, the 32-bit value will be byteswapped.
2486 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2490 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2491 return tg3_nvram_read_using_eeprom(tp, offset, val);
2493 offset = tg3_nvram_phys_addr(tp, offset);
2495 if (offset > NVRAM_ADDR_MSK)
2498 ret = tg3_nvram_lock(tp);
2502 tg3_enable_nvram_access(tp);
2504 tw32(NVRAM_ADDR, offset);
2505 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2506 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2509 *val = tr32(NVRAM_RDDATA);
2511 tg3_disable_nvram_access(tp);
2513 tg3_nvram_unlock(tp);
2518 /* Ensures NVRAM data is in bytestream format. */
2519 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2522 int res = tg3_nvram_read(tp, offset, &v);
2524 *val = cpu_to_be32(v);
2528 /* tp->lock is held. */
2529 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2531 u32 addr_high, addr_low;
2534 addr_high = ((tp->dev->dev_addr[0] << 8) |
2535 tp->dev->dev_addr[1]);
2536 addr_low = ((tp->dev->dev_addr[2] << 24) |
2537 (tp->dev->dev_addr[3] << 16) |
2538 (tp->dev->dev_addr[4] << 8) |
2539 (tp->dev->dev_addr[5] << 0));
2540 for (i = 0; i < 4; i++) {
2541 if (i == 1 && skip_mac_1)
2543 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2544 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2549 for (i = 0; i < 12; i++) {
2550 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2551 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2555 addr_high = (tp->dev->dev_addr[0] +
2556 tp->dev->dev_addr[1] +
2557 tp->dev->dev_addr[2] +
2558 tp->dev->dev_addr[3] +
2559 tp->dev->dev_addr[4] +
2560 tp->dev->dev_addr[5]) &
2561 TX_BACKOFF_SEED_MASK;
2562 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2565 static void tg3_enable_register_access(struct tg3 *tp)
2568 * Make sure register accesses (indirect or otherwise) will function
2571 pci_write_config_dword(tp->pdev,
2572 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2575 static int tg3_power_up(struct tg3 *tp)
2577 tg3_enable_register_access(tp);
2579 pci_set_power_state(tp->pdev, PCI_D0);
2581 /* Switch out of Vaux if it is a NIC */
2582 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2583 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2588 static int tg3_power_down_prepare(struct tg3 *tp)
2591 bool device_should_wake, do_low_power;
2593 tg3_enable_register_access(tp);
2595 /* Restore the CLKREQ setting. */
2596 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2599 pci_read_config_word(tp->pdev,
2600 tp->pcie_cap + PCI_EXP_LNKCTL,
2602 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2603 pci_write_config_word(tp->pdev,
2604 tp->pcie_cap + PCI_EXP_LNKCTL,
2608 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2609 tw32(TG3PCI_MISC_HOST_CTRL,
2610 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2612 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2613 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2615 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2616 do_low_power = false;
2617 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2618 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2619 struct phy_device *phydev;
2620 u32 phyid, advertising;
2622 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2624 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2626 tp->link_config.orig_speed = phydev->speed;
2627 tp->link_config.orig_duplex = phydev->duplex;
2628 tp->link_config.orig_autoneg = phydev->autoneg;
2629 tp->link_config.orig_advertising = phydev->advertising;
2631 advertising = ADVERTISED_TP |
2633 ADVERTISED_Autoneg |
2634 ADVERTISED_10baseT_Half;
2636 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2637 device_should_wake) {
2638 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2640 ADVERTISED_100baseT_Half |
2641 ADVERTISED_100baseT_Full |
2642 ADVERTISED_10baseT_Full;
2644 advertising |= ADVERTISED_10baseT_Full;
2647 phydev->advertising = advertising;
2649 phy_start_aneg(phydev);
2651 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2652 if (phyid != PHY_ID_BCMAC131) {
2653 phyid &= PHY_BCM_OUI_MASK;
2654 if (phyid == PHY_BCM_OUI_1 ||
2655 phyid == PHY_BCM_OUI_2 ||
2656 phyid == PHY_BCM_OUI_3)
2657 do_low_power = true;
2661 do_low_power = true;
2663 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2664 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2665 tp->link_config.orig_speed = tp->link_config.speed;
2666 tp->link_config.orig_duplex = tp->link_config.duplex;
2667 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2670 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2671 tp->link_config.speed = SPEED_10;
2672 tp->link_config.duplex = DUPLEX_HALF;
2673 tp->link_config.autoneg = AUTONEG_ENABLE;
2674 tg3_setup_phy(tp, 0);
2678 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2681 val = tr32(GRC_VCPU_EXT_CTRL);
2682 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2683 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2687 for (i = 0; i < 200; i++) {
2688 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2689 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2694 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2695 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2696 WOL_DRV_STATE_SHUTDOWN |
2700 if (device_should_wake) {
2703 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2705 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2709 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2710 mac_mode = MAC_MODE_PORT_MODE_GMII;
2712 mac_mode = MAC_MODE_PORT_MODE_MII;
2714 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2715 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2717 u32 speed = (tp->tg3_flags &
2718 TG3_FLAG_WOL_SPEED_100MB) ?
2719 SPEED_100 : SPEED_10;
2720 if (tg3_5700_link_polarity(tp, speed))
2721 mac_mode |= MAC_MODE_LINK_POLARITY;
2723 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2726 mac_mode = MAC_MODE_PORT_MODE_TBI;
2729 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2730 tw32(MAC_LED_CTRL, tp->led_ctrl);
2732 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2733 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2734 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2735 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2736 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2737 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2739 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2740 mac_mode |= MAC_MODE_APE_TX_EN |
2741 MAC_MODE_APE_RX_EN |
2742 MAC_MODE_TDE_ENABLE;
2744 tw32_f(MAC_MODE, mac_mode);
2747 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2751 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2752 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2756 base_val = tp->pci_clock_ctrl;
2757 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2758 CLOCK_CTRL_TXCLK_DISABLE);
2760 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2761 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2762 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2763 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2764 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2766 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2767 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2768 u32 newbits1, newbits2;
2770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2771 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2772 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2773 CLOCK_CTRL_TXCLK_DISABLE |
2775 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2776 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2777 newbits1 = CLOCK_CTRL_625_CORE;
2778 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2780 newbits1 = CLOCK_CTRL_ALTCLK;
2781 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2784 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2787 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2790 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2793 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2794 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2795 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2796 CLOCK_CTRL_TXCLK_DISABLE |
2797 CLOCK_CTRL_44MHZ_CORE);
2799 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2802 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2803 tp->pci_clock_ctrl | newbits3, 40);
2807 if (!(device_should_wake) &&
2808 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2809 tg3_power_down_phy(tp, do_low_power);
2811 tg3_frob_aux_power(tp);
2813 /* Workaround for unstable PLL clock */
2814 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2815 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2816 u32 val = tr32(0x7d00);
2818 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2820 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2823 err = tg3_nvram_lock(tp);
2824 tg3_halt_cpu(tp, RX_CPU_BASE);
2826 tg3_nvram_unlock(tp);
2830 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2835 static void tg3_power_down(struct tg3 *tp)
2837 tg3_power_down_prepare(tp);
2839 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2840 pci_set_power_state(tp->pdev, PCI_D3hot);
2843 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2845 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2846 case MII_TG3_AUX_STAT_10HALF:
2848 *duplex = DUPLEX_HALF;
2851 case MII_TG3_AUX_STAT_10FULL:
2853 *duplex = DUPLEX_FULL;
2856 case MII_TG3_AUX_STAT_100HALF:
2858 *duplex = DUPLEX_HALF;
2861 case MII_TG3_AUX_STAT_100FULL:
2863 *duplex = DUPLEX_FULL;
2866 case MII_TG3_AUX_STAT_1000HALF:
2867 *speed = SPEED_1000;
2868 *duplex = DUPLEX_HALF;
2871 case MII_TG3_AUX_STAT_1000FULL:
2872 *speed = SPEED_1000;
2873 *duplex = DUPLEX_FULL;
2877 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2878 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2880 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2884 *speed = SPEED_INVALID;
2885 *duplex = DUPLEX_INVALID;
2890 static void tg3_phy_copper_begin(struct tg3 *tp)
2895 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2896 /* Entering low power mode. Disable gigabit and
2897 * 100baseT advertisements.
2899 tg3_writephy(tp, MII_TG3_CTRL, 0);
2901 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2902 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2903 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2904 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2906 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2907 } else if (tp->link_config.speed == SPEED_INVALID) {
2908 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2909 tp->link_config.advertising &=
2910 ~(ADVERTISED_1000baseT_Half |
2911 ADVERTISED_1000baseT_Full);
2913 new_adv = ADVERTISE_CSMA;
2914 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2915 new_adv |= ADVERTISE_10HALF;
2916 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2917 new_adv |= ADVERTISE_10FULL;
2918 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2919 new_adv |= ADVERTISE_100HALF;
2920 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2921 new_adv |= ADVERTISE_100FULL;
2923 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2925 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2927 if (tp->link_config.advertising &
2928 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2930 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2931 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2932 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2933 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2934 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY) &&
2935 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2936 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2937 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2938 MII_TG3_CTRL_ENABLE_AS_MASTER);
2939 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2941 tg3_writephy(tp, MII_TG3_CTRL, 0);
2944 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2945 new_adv |= ADVERTISE_CSMA;
2947 /* Asking for a specific link mode. */
2948 if (tp->link_config.speed == SPEED_1000) {
2949 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2951 if (tp->link_config.duplex == DUPLEX_FULL)
2952 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2954 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2955 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2956 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2957 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2958 MII_TG3_CTRL_ENABLE_AS_MASTER);
2960 if (tp->link_config.speed == SPEED_100) {
2961 if (tp->link_config.duplex == DUPLEX_FULL)
2962 new_adv |= ADVERTISE_100FULL;
2964 new_adv |= ADVERTISE_100HALF;
2966 if (tp->link_config.duplex == DUPLEX_FULL)
2967 new_adv |= ADVERTISE_10FULL;
2969 new_adv |= ADVERTISE_10HALF;
2971 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2976 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2979 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2982 tw32(TG3_CPMU_EEE_MODE,
2983 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2985 /* Enable SM_DSP clock and tx 6dB coding. */
2986 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
2987 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
2988 MII_TG3_AUXCTL_ACTL_TX_6DB;
2989 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2991 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2993 case ASIC_REV_57765:
2994 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2995 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2996 MII_TG3_DSP_CH34TP2_HIBW01);
2999 val = MII_TG3_DSP_TAP26_ALNOKO |
3000 MII_TG3_DSP_TAP26_RMRXSTO |
3001 MII_TG3_DSP_TAP26_OPCSINPT;
3002 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3006 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3007 /* Advertise 100-BaseTX EEE ability */
3008 if (tp->link_config.advertising &
3009 ADVERTISED_100baseT_Full)
3010 val |= MDIO_AN_EEE_ADV_100TX;
3011 /* Advertise 1000-BaseT EEE ability */
3012 if (tp->link_config.advertising &
3013 ADVERTISED_1000baseT_Full)
3014 val |= MDIO_AN_EEE_ADV_1000T;
3016 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3018 /* Turn off SM_DSP clock. */
3019 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
3020 MII_TG3_AUXCTL_ACTL_TX_6DB;
3021 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3024 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3025 tp->link_config.speed != SPEED_INVALID) {
3026 u32 bmcr, orig_bmcr;
3028 tp->link_config.active_speed = tp->link_config.speed;
3029 tp->link_config.active_duplex = tp->link_config.duplex;
3032 switch (tp->link_config.speed) {
3038 bmcr |= BMCR_SPEED100;
3042 bmcr |= TG3_BMCR_SPEED1000;
3046 if (tp->link_config.duplex == DUPLEX_FULL)
3047 bmcr |= BMCR_FULLDPLX;
3049 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3050 (bmcr != orig_bmcr)) {
3051 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3052 for (i = 0; i < 1500; i++) {
3056 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3057 tg3_readphy(tp, MII_BMSR, &tmp))
3059 if (!(tmp & BMSR_LSTATUS)) {
3064 tg3_writephy(tp, MII_BMCR, bmcr);
3068 tg3_writephy(tp, MII_BMCR,
3069 BMCR_ANENABLE | BMCR_ANRESTART);
3073 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3077 /* Turn off tap power management. */
3078 /* Set Extended packet length bit */
3079 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
3081 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3082 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3083 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3084 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3085 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3092 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3094 u32 adv_reg, all_mask = 0;
3096 if (mask & ADVERTISED_10baseT_Half)
3097 all_mask |= ADVERTISE_10HALF;
3098 if (mask & ADVERTISED_10baseT_Full)
3099 all_mask |= ADVERTISE_10FULL;
3100 if (mask & ADVERTISED_100baseT_Half)
3101 all_mask |= ADVERTISE_100HALF;
3102 if (mask & ADVERTISED_100baseT_Full)
3103 all_mask |= ADVERTISE_100FULL;
3105 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3108 if ((adv_reg & all_mask) != all_mask)
3110 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3114 if (mask & ADVERTISED_1000baseT_Half)
3115 all_mask |= ADVERTISE_1000HALF;
3116 if (mask & ADVERTISED_1000baseT_Full)
3117 all_mask |= ADVERTISE_1000FULL;
3119 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3122 if ((tg3_ctrl & all_mask) != all_mask)
3128 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3132 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3135 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3136 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3138 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3139 if (curadv != reqadv)
3142 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3143 tg3_readphy(tp, MII_LPA, rmtadv);
3145 /* Reprogram the advertisement register, even if it
3146 * does not affect the current link. If the link
3147 * gets renegotiated in the future, we can save an
3148 * additional renegotiation cycle by advertising
3149 * it correctly in the first place.
3151 if (curadv != reqadv) {
3152 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3153 ADVERTISE_PAUSE_ASYM);
3154 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3161 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3163 int current_link_up;
3165 u32 lcl_adv, rmt_adv;
3173 (MAC_STATUS_SYNC_CHANGED |
3174 MAC_STATUS_CFG_CHANGED |
3175 MAC_STATUS_MI_COMPLETION |
3176 MAC_STATUS_LNKSTATE_CHANGED));
3179 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3181 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3185 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3187 /* Some third-party PHYs need to be reset on link going
3190 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3191 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3192 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3193 netif_carrier_ok(tp->dev)) {
3194 tg3_readphy(tp, MII_BMSR, &bmsr);
3195 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3196 !(bmsr & BMSR_LSTATUS))
3202 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3203 tg3_readphy(tp, MII_BMSR, &bmsr);
3204 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3205 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3208 if (!(bmsr & BMSR_LSTATUS)) {
3209 err = tg3_init_5401phy_dsp(tp);
3213 tg3_readphy(tp, MII_BMSR, &bmsr);
3214 for (i = 0; i < 1000; i++) {
3216 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3217 (bmsr & BMSR_LSTATUS)) {
3223 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3224 TG3_PHY_REV_BCM5401_B0 &&
3225 !(bmsr & BMSR_LSTATUS) &&
3226 tp->link_config.active_speed == SPEED_1000) {
3227 err = tg3_phy_reset(tp);
3229 err = tg3_init_5401phy_dsp(tp);
3234 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3235 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3236 /* 5701 {A0,B0} CRC bug workaround */
3237 tg3_writephy(tp, 0x15, 0x0a75);
3238 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3239 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3240 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3243 /* Clear pending interrupts... */
3244 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3245 tg3_readphy(tp, MII_TG3_ISTAT, &val);
3247 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3248 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3249 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3250 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3253 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3254 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3255 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3256 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3258 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3261 current_link_up = 0;
3262 current_speed = SPEED_INVALID;
3263 current_duplex = DUPLEX_INVALID;
3265 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3266 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3267 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3268 if (!(val & (1 << 10))) {
3270 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3276 for (i = 0; i < 100; i++) {
3277 tg3_readphy(tp, MII_BMSR, &bmsr);
3278 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3279 (bmsr & BMSR_LSTATUS))
3284 if (bmsr & BMSR_LSTATUS) {
3287 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3288 for (i = 0; i < 2000; i++) {
3290 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3295 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3300 for (i = 0; i < 200; i++) {
3301 tg3_readphy(tp, MII_BMCR, &bmcr);
3302 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3304 if (bmcr && bmcr != 0x7fff)
3312 tp->link_config.active_speed = current_speed;
3313 tp->link_config.active_duplex = current_duplex;
3315 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3316 if ((bmcr & BMCR_ANENABLE) &&
3317 tg3_copper_is_advertising_all(tp,
3318 tp->link_config.advertising)) {
3319 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3321 current_link_up = 1;
3324 if (!(bmcr & BMCR_ANENABLE) &&
3325 tp->link_config.speed == current_speed &&
3326 tp->link_config.duplex == current_duplex &&
3327 tp->link_config.flowctrl ==
3328 tp->link_config.active_flowctrl) {
3329 current_link_up = 1;
3333 if (current_link_up == 1 &&
3334 tp->link_config.active_duplex == DUPLEX_FULL)
3335 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3339 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3340 tg3_phy_copper_begin(tp);
3342 tg3_readphy(tp, MII_BMSR, &bmsr);
3343 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3344 (bmsr & BMSR_LSTATUS))
3345 current_link_up = 1;
3348 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3349 if (current_link_up == 1) {
3350 if (tp->link_config.active_speed == SPEED_100 ||
3351 tp->link_config.active_speed == SPEED_10)
3352 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3354 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3355 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3356 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3358 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3360 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3361 if (tp->link_config.active_duplex == DUPLEX_HALF)
3362 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3364 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3365 if (current_link_up == 1 &&
3366 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3367 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3369 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3372 /* ??? Without this setting Netgear GA302T PHY does not
3373 * ??? send/receive packets...
3375 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3376 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3377 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3378 tw32_f(MAC_MI_MODE, tp->mi_mode);
3382 tw32_f(MAC_MODE, tp->mac_mode);
3385 tg3_phy_eee_adjust(tp, current_link_up);
3387 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3388 /* Polled via timer. */
3389 tw32_f(MAC_EVENT, 0);
3391 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3396 current_link_up == 1 &&
3397 tp->link_config.active_speed == SPEED_1000 &&
3398 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3399 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3402 (MAC_STATUS_SYNC_CHANGED |
3403 MAC_STATUS_CFG_CHANGED));
3406 NIC_SRAM_FIRMWARE_MBOX,
3407 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3410 /* Prevent send BD corruption. */
3411 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3412 u16 oldlnkctl, newlnkctl;
3414 pci_read_config_word(tp->pdev,
3415 tp->pcie_cap + PCI_EXP_LNKCTL,
3417 if (tp->link_config.active_speed == SPEED_100 ||
3418 tp->link_config.active_speed == SPEED_10)
3419 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3421 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3422 if (newlnkctl != oldlnkctl)
3423 pci_write_config_word(tp->pdev,
3424 tp->pcie_cap + PCI_EXP_LNKCTL,
3428 if (current_link_up != netif_carrier_ok(tp->dev)) {
3429 if (current_link_up)
3430 netif_carrier_on(tp->dev);
3432 netif_carrier_off(tp->dev);
3433 tg3_link_report(tp);
3439 struct tg3_fiber_aneginfo {
3441 #define ANEG_STATE_UNKNOWN 0
3442 #define ANEG_STATE_AN_ENABLE 1
3443 #define ANEG_STATE_RESTART_INIT 2
3444 #define ANEG_STATE_RESTART 3
3445 #define ANEG_STATE_DISABLE_LINK_OK 4
3446 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3447 #define ANEG_STATE_ABILITY_DETECT 6
3448 #define ANEG_STATE_ACK_DETECT_INIT 7
3449 #define ANEG_STATE_ACK_DETECT 8
3450 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3451 #define ANEG_STATE_COMPLETE_ACK 10
3452 #define ANEG_STATE_IDLE_DETECT_INIT 11
3453 #define ANEG_STATE_IDLE_DETECT 12
3454 #define ANEG_STATE_LINK_OK 13
3455 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3456 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3459 #define MR_AN_ENABLE 0x00000001
3460 #define MR_RESTART_AN 0x00000002
3461 #define MR_AN_COMPLETE 0x00000004
3462 #define MR_PAGE_RX 0x00000008
3463 #define MR_NP_LOADED 0x00000010
3464 #define MR_TOGGLE_TX 0x00000020
3465 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3466 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3467 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3468 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3469 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3470 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3471 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3472 #define MR_TOGGLE_RX 0x00002000
3473 #define MR_NP_RX 0x00004000
3475 #define MR_LINK_OK 0x80000000
3477 unsigned long link_time, cur_time;
3479 u32 ability_match_cfg;
3480 int ability_match_count;
3482 char ability_match, idle_match, ack_match;
3484 u32 txconfig, rxconfig;
3485 #define ANEG_CFG_NP 0x00000080
3486 #define ANEG_CFG_ACK 0x00000040
3487 #define ANEG_CFG_RF2 0x00000020
3488 #define ANEG_CFG_RF1 0x00000010
3489 #define ANEG_CFG_PS2 0x00000001
3490 #define ANEG_CFG_PS1 0x00008000
3491 #define ANEG_CFG_HD 0x00004000
3492 #define ANEG_CFG_FD 0x00002000
3493 #define ANEG_CFG_INVAL 0x00001f06
3498 #define ANEG_TIMER_ENAB 2
3499 #define ANEG_FAILED -1
3501 #define ANEG_STATE_SETTLE_TIME 10000
3503 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3504 struct tg3_fiber_aneginfo *ap)
3507 unsigned long delta;
3511 if (ap->state == ANEG_STATE_UNKNOWN) {
3515 ap->ability_match_cfg = 0;
3516 ap->ability_match_count = 0;
3517 ap->ability_match = 0;
3523 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3524 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3526 if (rx_cfg_reg != ap->ability_match_cfg) {
3527 ap->ability_match_cfg = rx_cfg_reg;
3528 ap->ability_match = 0;
3529 ap->ability_match_count = 0;
3531 if (++ap->ability_match_count > 1) {
3532 ap->ability_match = 1;
3533 ap->ability_match_cfg = rx_cfg_reg;
3536 if (rx_cfg_reg & ANEG_CFG_ACK)
3544 ap->ability_match_cfg = 0;
3545 ap->ability_match_count = 0;
3546 ap->ability_match = 0;
3552 ap->rxconfig = rx_cfg_reg;
3555 switch (ap->state) {
3556 case ANEG_STATE_UNKNOWN:
3557 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3558 ap->state = ANEG_STATE_AN_ENABLE;
3561 case ANEG_STATE_AN_ENABLE:
3562 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3563 if (ap->flags & MR_AN_ENABLE) {
3566 ap->ability_match_cfg = 0;
3567 ap->ability_match_count = 0;
3568 ap->ability_match = 0;
3572 ap->state = ANEG_STATE_RESTART_INIT;
3574 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3578 case ANEG_STATE_RESTART_INIT:
3579 ap->link_time = ap->cur_time;
3580 ap->flags &= ~(MR_NP_LOADED);
3582 tw32(MAC_TX_AUTO_NEG, 0);
3583 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3584 tw32_f(MAC_MODE, tp->mac_mode);
3587 ret = ANEG_TIMER_ENAB;
3588 ap->state = ANEG_STATE_RESTART;
3591 case ANEG_STATE_RESTART:
3592 delta = ap->cur_time - ap->link_time;
3593 if (delta > ANEG_STATE_SETTLE_TIME)
3594 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3596 ret = ANEG_TIMER_ENAB;
3599 case ANEG_STATE_DISABLE_LINK_OK:
3603 case ANEG_STATE_ABILITY_DETECT_INIT:
3604 ap->flags &= ~(MR_TOGGLE_TX);
3605 ap->txconfig = ANEG_CFG_FD;
3606 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3607 if (flowctrl & ADVERTISE_1000XPAUSE)
3608 ap->txconfig |= ANEG_CFG_PS1;
3609 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3610 ap->txconfig |= ANEG_CFG_PS2;
3611 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3612 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3613 tw32_f(MAC_MODE, tp->mac_mode);
3616 ap->state = ANEG_STATE_ABILITY_DETECT;
3619 case ANEG_STATE_ABILITY_DETECT:
3620 if (ap->ability_match != 0 && ap->rxconfig != 0)
3621 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3624 case ANEG_STATE_ACK_DETECT_INIT:
3625 ap->txconfig |= ANEG_CFG_ACK;
3626 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3627 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3628 tw32_f(MAC_MODE, tp->mac_mode);
3631 ap->state = ANEG_STATE_ACK_DETECT;
3634 case ANEG_STATE_ACK_DETECT:
3635 if (ap->ack_match != 0) {
3636 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3637 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3638 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3640 ap->state = ANEG_STATE_AN_ENABLE;
3642 } else if (ap->ability_match != 0 &&
3643 ap->rxconfig == 0) {
3644 ap->state = ANEG_STATE_AN_ENABLE;
3648 case ANEG_STATE_COMPLETE_ACK_INIT:
3649 if (ap->rxconfig & ANEG_CFG_INVAL) {
3653 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3654 MR_LP_ADV_HALF_DUPLEX |
3655 MR_LP_ADV_SYM_PAUSE |
3656 MR_LP_ADV_ASYM_PAUSE |
3657 MR_LP_ADV_REMOTE_FAULT1 |
3658 MR_LP_ADV_REMOTE_FAULT2 |
3659 MR_LP_ADV_NEXT_PAGE |
3662 if (ap->rxconfig & ANEG_CFG_FD)
3663 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3664 if (ap->rxconfig & ANEG_CFG_HD)
3665 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3666 if (ap->rxconfig & ANEG_CFG_PS1)
3667 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3668 if (ap->rxconfig & ANEG_CFG_PS2)
3669 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3670 if (ap->rxconfig & ANEG_CFG_RF1)
3671 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3672 if (ap->rxconfig & ANEG_CFG_RF2)
3673 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3674 if (ap->rxconfig & ANEG_CFG_NP)
3675 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3677 ap->link_time = ap->cur_time;
3679 ap->flags ^= (MR_TOGGLE_TX);
3680 if (ap->rxconfig & 0x0008)
3681 ap->flags |= MR_TOGGLE_RX;
3682 if (ap->rxconfig & ANEG_CFG_NP)
3683 ap->flags |= MR_NP_RX;
3684 ap->flags |= MR_PAGE_RX;
3686 ap->state = ANEG_STATE_COMPLETE_ACK;
3687 ret = ANEG_TIMER_ENAB;
3690 case ANEG_STATE_COMPLETE_ACK:
3691 if (ap->ability_match != 0 &&
3692 ap->rxconfig == 0) {
3693 ap->state = ANEG_STATE_AN_ENABLE;
3696 delta = ap->cur_time - ap->link_time;
3697 if (delta > ANEG_STATE_SETTLE_TIME) {
3698 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3699 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3701 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3702 !(ap->flags & MR_NP_RX)) {
3703 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3711 case ANEG_STATE_IDLE_DETECT_INIT:
3712 ap->link_time = ap->cur_time;
3713 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3714 tw32_f(MAC_MODE, tp->mac_mode);
3717 ap->state = ANEG_STATE_IDLE_DETECT;
3718 ret = ANEG_TIMER_ENAB;
3721 case ANEG_STATE_IDLE_DETECT:
3722 if (ap->ability_match != 0 &&
3723 ap->rxconfig == 0) {
3724 ap->state = ANEG_STATE_AN_ENABLE;
3727 delta = ap->cur_time - ap->link_time;
3728 if (delta > ANEG_STATE_SETTLE_TIME) {
3729 /* XXX another gem from the Broadcom driver :( */
3730 ap->state = ANEG_STATE_LINK_OK;
3734 case ANEG_STATE_LINK_OK:
3735 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3739 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3740 /* ??? unimplemented */
3743 case ANEG_STATE_NEXT_PAGE_WAIT:
3744 /* ??? unimplemented */
3755 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3758 struct tg3_fiber_aneginfo aninfo;
3759 int status = ANEG_FAILED;
3763 tw32_f(MAC_TX_AUTO_NEG, 0);
3765 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3766 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3769 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3772 memset(&aninfo, 0, sizeof(aninfo));
3773 aninfo.flags |= MR_AN_ENABLE;
3774 aninfo.state = ANEG_STATE_UNKNOWN;
3775 aninfo.cur_time = 0;
3777 while (++tick < 195000) {
3778 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3779 if (status == ANEG_DONE || status == ANEG_FAILED)
3785 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3786 tw32_f(MAC_MODE, tp->mac_mode);
3789 *txflags = aninfo.txconfig;
3790 *rxflags = aninfo.flags;
3792 if (status == ANEG_DONE &&
3793 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3794 MR_LP_ADV_FULL_DUPLEX)))
3800 static void tg3_init_bcm8002(struct tg3 *tp)
3802 u32 mac_status = tr32(MAC_STATUS);
3805 /* Reset when initting first time or we have a link. */
3806 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3807 !(mac_status & MAC_STATUS_PCS_SYNCED))
3810 /* Set PLL lock range. */
3811 tg3_writephy(tp, 0x16, 0x8007);
3814 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3816 /* Wait for reset to complete. */
3817 /* XXX schedule_timeout() ... */
3818 for (i = 0; i < 500; i++)
3821 /* Config mode; select PMA/Ch 1 regs. */
3822 tg3_writephy(tp, 0x10, 0x8411);
3824 /* Enable auto-lock and comdet, select txclk for tx. */
3825 tg3_writephy(tp, 0x11, 0x0a10);
3827 tg3_writephy(tp, 0x18, 0x00a0);
3828 tg3_writephy(tp, 0x16, 0x41ff);
3830 /* Assert and deassert POR. */
3831 tg3_writephy(tp, 0x13, 0x0400);
3833 tg3_writephy(tp, 0x13, 0x0000);
3835 tg3_writephy(tp, 0x11, 0x0a50);
3837 tg3_writephy(tp, 0x11, 0x0a10);
3839 /* Wait for signal to stabilize */
3840 /* XXX schedule_timeout() ... */
3841 for (i = 0; i < 15000; i++)
3844 /* Deselect the channel register so we can read the PHYID
3847 tg3_writephy(tp, 0x10, 0x8011);
3850 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3853 u32 sg_dig_ctrl, sg_dig_status;
3854 u32 serdes_cfg, expected_sg_dig_ctrl;
3855 int workaround, port_a;
3856 int current_link_up;
3859 expected_sg_dig_ctrl = 0;
3862 current_link_up = 0;
3864 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3865 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3867 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3870 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3871 /* preserve bits 20-23 for voltage regulator */
3872 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3875 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3877 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3878 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3880 u32 val = serdes_cfg;
3886 tw32_f(MAC_SERDES_CFG, val);
3889 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3891 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3892 tg3_setup_flow_control(tp, 0, 0);
3893 current_link_up = 1;
3898 /* Want auto-negotiation. */
3899 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3901 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3902 if (flowctrl & ADVERTISE_1000XPAUSE)
3903 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3904 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3905 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3907 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3908 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3909 tp->serdes_counter &&
3910 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3911 MAC_STATUS_RCVD_CFG)) ==
3912 MAC_STATUS_PCS_SYNCED)) {
3913 tp->serdes_counter--;
3914 current_link_up = 1;
3919 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3920 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3922 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3924 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3925 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3926 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3927 MAC_STATUS_SIGNAL_DET)) {
3928 sg_dig_status = tr32(SG_DIG_STATUS);
3929 mac_status = tr32(MAC_STATUS);
3931 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3932 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3933 u32 local_adv = 0, remote_adv = 0;
3935 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3936 local_adv |= ADVERTISE_1000XPAUSE;
3937 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3938 local_adv |= ADVERTISE_1000XPSE_ASYM;
3940 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3941 remote_adv |= LPA_1000XPAUSE;
3942 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3943 remote_adv |= LPA_1000XPAUSE_ASYM;
3945 tg3_setup_flow_control(tp, local_adv, remote_adv);
3946 current_link_up = 1;
3947 tp->serdes_counter = 0;
3948 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3949 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3950 if (tp->serdes_counter)
3951 tp->serdes_counter--;
3954 u32 val = serdes_cfg;
3961 tw32_f(MAC_SERDES_CFG, val);
3964 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3967 /* Link parallel detection - link is up */
3968 /* only if we have PCS_SYNC and not */
3969 /* receiving config code words */
3970 mac_status = tr32(MAC_STATUS);
3971 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3972 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3973 tg3_setup_flow_control(tp, 0, 0);
3974 current_link_up = 1;
3976 TG3_PHYFLG_PARALLEL_DETECT;
3977 tp->serdes_counter =
3978 SERDES_PARALLEL_DET_TIMEOUT;
3980 goto restart_autoneg;
3984 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3985 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3989 return current_link_up;
3992 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3994 int current_link_up = 0;
3996 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3999 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4000 u32 txflags, rxflags;
4003 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4004 u32 local_adv = 0, remote_adv = 0;
4006 if (txflags & ANEG_CFG_PS1)
4007 local_adv |= ADVERTISE_1000XPAUSE;
4008 if (txflags & ANEG_CFG_PS2)
4009 local_adv |= ADVERTISE_1000XPSE_ASYM;
4011 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4012 remote_adv |= LPA_1000XPAUSE;
4013 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4014 remote_adv |= LPA_1000XPAUSE_ASYM;
4016 tg3_setup_flow_control(tp, local_adv, remote_adv);
4018 current_link_up = 1;
4020 for (i = 0; i < 30; i++) {
4023 (MAC_STATUS_SYNC_CHANGED |
4024 MAC_STATUS_CFG_CHANGED));
4026 if ((tr32(MAC_STATUS) &
4027 (MAC_STATUS_SYNC_CHANGED |
4028 MAC_STATUS_CFG_CHANGED)) == 0)
4032 mac_status = tr32(MAC_STATUS);
4033 if (current_link_up == 0 &&
4034 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4035 !(mac_status & MAC_STATUS_RCVD_CFG))
4036 current_link_up = 1;
4038 tg3_setup_flow_control(tp, 0, 0);
4040 /* Forcing 1000FD link up. */
4041 current_link_up = 1;
4043 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4046 tw32_f(MAC_MODE, tp->mac_mode);
4051 return current_link_up;
4054 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4057 u16 orig_active_speed;
4058 u8 orig_active_duplex;
4060 int current_link_up;
4063 orig_pause_cfg = tp->link_config.active_flowctrl;
4064 orig_active_speed = tp->link_config.active_speed;
4065 orig_active_duplex = tp->link_config.active_duplex;
4067 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
4068 netif_carrier_ok(tp->dev) &&
4069 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
4070 mac_status = tr32(MAC_STATUS);
4071 mac_status &= (MAC_STATUS_PCS_SYNCED |
4072 MAC_STATUS_SIGNAL_DET |
4073 MAC_STATUS_CFG_CHANGED |
4074 MAC_STATUS_RCVD_CFG);
4075 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4076 MAC_STATUS_SIGNAL_DET)) {
4077 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4078 MAC_STATUS_CFG_CHANGED));
4083 tw32_f(MAC_TX_AUTO_NEG, 0);
4085 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4086 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4087 tw32_f(MAC_MODE, tp->mac_mode);
4090 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4091 tg3_init_bcm8002(tp);
4093 /* Enable link change event even when serdes polling. */
4094 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4097 current_link_up = 0;
4098 mac_status = tr32(MAC_STATUS);
4100 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4101 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4103 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4105 tp->napi[0].hw_status->status =
4106 (SD_STATUS_UPDATED |
4107 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4109 for (i = 0; i < 100; i++) {
4110 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4111 MAC_STATUS_CFG_CHANGED));
4113 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4114 MAC_STATUS_CFG_CHANGED |
4115 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4119 mac_status = tr32(MAC_STATUS);
4120 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4121 current_link_up = 0;
4122 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4123 tp->serdes_counter == 0) {
4124 tw32_f(MAC_MODE, (tp->mac_mode |
4125 MAC_MODE_SEND_CONFIGS));
4127 tw32_f(MAC_MODE, tp->mac_mode);
4131 if (current_link_up == 1) {
4132 tp->link_config.active_speed = SPEED_1000;
4133 tp->link_config.active_duplex = DUPLEX_FULL;
4134 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4135 LED_CTRL_LNKLED_OVERRIDE |
4136 LED_CTRL_1000MBPS_ON));
4138 tp->link_config.active_speed = SPEED_INVALID;
4139 tp->link_config.active_duplex = DUPLEX_INVALID;
4140 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4141 LED_CTRL_LNKLED_OVERRIDE |
4142 LED_CTRL_TRAFFIC_OVERRIDE));
4145 if (current_link_up != netif_carrier_ok(tp->dev)) {
4146 if (current_link_up)
4147 netif_carrier_on(tp->dev);
4149 netif_carrier_off(tp->dev);
4150 tg3_link_report(tp);
4152 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4153 if (orig_pause_cfg != now_pause_cfg ||
4154 orig_active_speed != tp->link_config.active_speed ||
4155 orig_active_duplex != tp->link_config.active_duplex)
4156 tg3_link_report(tp);
4162 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4164 int current_link_up, err = 0;
4168 u32 local_adv, remote_adv;
4170 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4171 tw32_f(MAC_MODE, tp->mac_mode);
4177 (MAC_STATUS_SYNC_CHANGED |
4178 MAC_STATUS_CFG_CHANGED |
4179 MAC_STATUS_MI_COMPLETION |
4180 MAC_STATUS_LNKSTATE_CHANGED));
4186 current_link_up = 0;
4187 current_speed = SPEED_INVALID;
4188 current_duplex = DUPLEX_INVALID;
4190 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4191 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4193 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4194 bmsr |= BMSR_LSTATUS;
4196 bmsr &= ~BMSR_LSTATUS;
4199 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4201 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4202 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4203 /* do nothing, just check for link up at the end */
4204 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4207 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4208 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4209 ADVERTISE_1000XPAUSE |
4210 ADVERTISE_1000XPSE_ASYM |
4213 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4215 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4216 new_adv |= ADVERTISE_1000XHALF;
4217 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4218 new_adv |= ADVERTISE_1000XFULL;
4220 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4221 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4222 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4223 tg3_writephy(tp, MII_BMCR, bmcr);
4225 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4226 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4227 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4234 bmcr &= ~BMCR_SPEED1000;
4235 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4237 if (tp->link_config.duplex == DUPLEX_FULL)
4238 new_bmcr |= BMCR_FULLDPLX;
4240 if (new_bmcr != bmcr) {
4241 /* BMCR_SPEED1000 is a reserved bit that needs
4242 * to be set on write.
4244 new_bmcr |= BMCR_SPEED1000;
4246 /* Force a linkdown */
4247 if (netif_carrier_ok(tp->dev)) {
4250 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4251 adv &= ~(ADVERTISE_1000XFULL |
4252 ADVERTISE_1000XHALF |
4254 tg3_writephy(tp, MII_ADVERTISE, adv);
4255 tg3_writephy(tp, MII_BMCR, bmcr |
4259 netif_carrier_off(tp->dev);
4261 tg3_writephy(tp, MII_BMCR, new_bmcr);
4263 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4264 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4267 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4268 bmsr |= BMSR_LSTATUS;
4270 bmsr &= ~BMSR_LSTATUS;
4272 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4276 if (bmsr & BMSR_LSTATUS) {
4277 current_speed = SPEED_1000;
4278 current_link_up = 1;
4279 if (bmcr & BMCR_FULLDPLX)
4280 current_duplex = DUPLEX_FULL;
4282 current_duplex = DUPLEX_HALF;
4287 if (bmcr & BMCR_ANENABLE) {
4290 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4291 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4292 common = local_adv & remote_adv;
4293 if (common & (ADVERTISE_1000XHALF |
4294 ADVERTISE_1000XFULL)) {
4295 if (common & ADVERTISE_1000XFULL)
4296 current_duplex = DUPLEX_FULL;
4298 current_duplex = DUPLEX_HALF;
4299 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4300 /* Link is up via parallel detect */
4302 current_link_up = 0;
4307 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4308 tg3_setup_flow_control(tp, local_adv, remote_adv);
4310 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4311 if (tp->link_config.active_duplex == DUPLEX_HALF)
4312 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4314 tw32_f(MAC_MODE, tp->mac_mode);
4317 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4319 tp->link_config.active_speed = current_speed;
4320 tp->link_config.active_duplex = current_duplex;
4322 if (current_link_up != netif_carrier_ok(tp->dev)) {
4323 if (current_link_up)
4324 netif_carrier_on(tp->dev);
4326 netif_carrier_off(tp->dev);
4327 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4329 tg3_link_report(tp);
4334 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4336 if (tp->serdes_counter) {
4337 /* Give autoneg time to complete. */
4338 tp->serdes_counter--;
4342 if (!netif_carrier_ok(tp->dev) &&
4343 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4346 tg3_readphy(tp, MII_BMCR, &bmcr);
4347 if (bmcr & BMCR_ANENABLE) {
4350 /* Select shadow register 0x1f */
4351 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4352 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4354 /* Select expansion interrupt status register */
4355 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4356 MII_TG3_DSP_EXP1_INT_STAT);
4357 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4358 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4360 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4361 /* We have signal detect and not receiving
4362 * config code words, link is up by parallel
4366 bmcr &= ~BMCR_ANENABLE;
4367 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4368 tg3_writephy(tp, MII_BMCR, bmcr);
4369 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4372 } else if (netif_carrier_ok(tp->dev) &&
4373 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4374 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4377 /* Select expansion interrupt status register */
4378 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4379 MII_TG3_DSP_EXP1_INT_STAT);
4380 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4384 /* Config code words received, turn on autoneg. */
4385 tg3_readphy(tp, MII_BMCR, &bmcr);
4386 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4388 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4394 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4398 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4399 err = tg3_setup_fiber_phy(tp, force_reset);
4400 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4401 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4403 err = tg3_setup_copper_phy(tp, force_reset);
4405 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4408 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4409 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4411 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4416 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4417 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4418 tw32(GRC_MISC_CFG, val);
4421 if (tp->link_config.active_speed == SPEED_1000 &&
4422 tp->link_config.active_duplex == DUPLEX_HALF)
4423 tw32(MAC_TX_LENGTHS,
4424 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4425 (6 << TX_LENGTHS_IPG_SHIFT) |
4426 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4428 tw32(MAC_TX_LENGTHS,
4429 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4430 (6 << TX_LENGTHS_IPG_SHIFT) |
4431 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4433 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4434 if (netif_carrier_ok(tp->dev)) {
4435 tw32(HOSTCC_STAT_COAL_TICKS,
4436 tp->coal.stats_block_coalesce_usecs);
4438 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4442 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4443 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4444 if (!netif_carrier_ok(tp->dev))
4445 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4448 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4449 tw32(PCIE_PWR_MGMT_THRESH, val);
4455 static inline int tg3_irq_sync(struct tg3 *tp)
4457 return tp->irq_sync;
4460 /* This is called whenever we suspect that the system chipset is re-
4461 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4462 * is bogus tx completions. We try to recover by setting the
4463 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4466 static void tg3_tx_recover(struct tg3 *tp)
4468 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4469 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4471 netdev_warn(tp->dev,
4472 "The system may be re-ordering memory-mapped I/O "
4473 "cycles to the network device, attempting to recover. "
4474 "Please report the problem to the driver maintainer "
4475 "and include system chipset information.\n");
4477 spin_lock(&tp->lock);
4478 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4479 spin_unlock(&tp->lock);
4482 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4484 /* Tell compiler to fetch tx indices from memory. */
4486 return tnapi->tx_pending -
4487 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4490 /* Tigon3 never reports partial packet sends. So we do not
4491 * need special logic to handle SKBs that have not had all
4492 * of their frags sent yet, like SunGEM does.
4494 static void tg3_tx(struct tg3_napi *tnapi)
4496 struct tg3 *tp = tnapi->tp;
4497 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4498 u32 sw_idx = tnapi->tx_cons;
4499 struct netdev_queue *txq;
4500 int index = tnapi - tp->napi;
4502 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4505 txq = netdev_get_tx_queue(tp->dev, index);
4507 while (sw_idx != hw_idx) {
4508 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4509 struct sk_buff *skb = ri->skb;
4512 if (unlikely(skb == NULL)) {
4517 pci_unmap_single(tp->pdev,
4518 dma_unmap_addr(ri, mapping),
4524 sw_idx = NEXT_TX(sw_idx);
4526 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4527 ri = &tnapi->tx_buffers[sw_idx];
4528 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4531 pci_unmap_page(tp->pdev,
4532 dma_unmap_addr(ri, mapping),
4533 skb_shinfo(skb)->frags[i].size,
4535 sw_idx = NEXT_TX(sw_idx);
4540 if (unlikely(tx_bug)) {
4546 tnapi->tx_cons = sw_idx;
4548 /* Need to make the tx_cons update visible to tg3_start_xmit()
4549 * before checking for netif_queue_stopped(). Without the
4550 * memory barrier, there is a small possibility that tg3_start_xmit()
4551 * will miss it and cause the queue to be stopped forever.
4555 if (unlikely(netif_tx_queue_stopped(txq) &&
4556 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4557 __netif_tx_lock(txq, smp_processor_id());
4558 if (netif_tx_queue_stopped(txq) &&
4559 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4560 netif_tx_wake_queue(txq);
4561 __netif_tx_unlock(txq);
4565 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4570 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4571 map_sz, PCI_DMA_FROMDEVICE);
4572 dev_kfree_skb_any(ri->skb);
4576 /* Returns size of skb allocated or < 0 on error.
4578 * We only need to fill in the address because the other members
4579 * of the RX descriptor are invariant, see tg3_init_rings.
4581 * Note the purposeful assymetry of cpu vs. chip accesses. For
4582 * posting buffers we only dirty the first cache line of the RX
4583 * descriptor (containing the address). Whereas for the RX status
4584 * buffers the cpu only reads the last cacheline of the RX descriptor
4585 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4587 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4588 u32 opaque_key, u32 dest_idx_unmasked)
4590 struct tg3_rx_buffer_desc *desc;
4591 struct ring_info *map;
4592 struct sk_buff *skb;
4594 int skb_size, dest_idx;
4596 switch (opaque_key) {
4597 case RXD_OPAQUE_RING_STD:
4598 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4599 desc = &tpr->rx_std[dest_idx];
4600 map = &tpr->rx_std_buffers[dest_idx];
4601 skb_size = tp->rx_pkt_map_sz;
4604 case RXD_OPAQUE_RING_JUMBO:
4605 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4606 desc = &tpr->rx_jmb[dest_idx].std;
4607 map = &tpr->rx_jmb_buffers[dest_idx];
4608 skb_size = TG3_RX_JMB_MAP_SZ;
4615 /* Do not overwrite any of the map or rp information
4616 * until we are sure we can commit to a new buffer.
4618 * Callers depend upon this behavior and assume that
4619 * we leave everything unchanged if we fail.
4621 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4625 skb_reserve(skb, tp->rx_offset);
4627 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4628 PCI_DMA_FROMDEVICE);
4629 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4635 dma_unmap_addr_set(map, mapping, mapping);
4637 desc->addr_hi = ((u64)mapping >> 32);
4638 desc->addr_lo = ((u64)mapping & 0xffffffff);
4643 /* We only need to move over in the address because the other
4644 * members of the RX descriptor are invariant. See notes above
4645 * tg3_alloc_rx_skb for full details.
4647 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4648 struct tg3_rx_prodring_set *dpr,
4649 u32 opaque_key, int src_idx,
4650 u32 dest_idx_unmasked)
4652 struct tg3 *tp = tnapi->tp;
4653 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4654 struct ring_info *src_map, *dest_map;
4655 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4658 switch (opaque_key) {
4659 case RXD_OPAQUE_RING_STD:
4660 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4661 dest_desc = &dpr->rx_std[dest_idx];
4662 dest_map = &dpr->rx_std_buffers[dest_idx];
4663 src_desc = &spr->rx_std[src_idx];
4664 src_map = &spr->rx_std_buffers[src_idx];
4667 case RXD_OPAQUE_RING_JUMBO:
4668 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4669 dest_desc = &dpr->rx_jmb[dest_idx].std;
4670 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4671 src_desc = &spr->rx_jmb[src_idx].std;
4672 src_map = &spr->rx_jmb_buffers[src_idx];
4679 dest_map->skb = src_map->skb;
4680 dma_unmap_addr_set(dest_map, mapping,
4681 dma_unmap_addr(src_map, mapping));
4682 dest_desc->addr_hi = src_desc->addr_hi;
4683 dest_desc->addr_lo = src_desc->addr_lo;
4685 /* Ensure that the update to the skb happens after the physical
4686 * addresses have been transferred to the new BD location.
4690 src_map->skb = NULL;
4693 /* The RX ring scheme is composed of multiple rings which post fresh
4694 * buffers to the chip, and one special ring the chip uses to report
4695 * status back to the host.
4697 * The special ring reports the status of received packets to the
4698 * host. The chip does not write into the original descriptor the
4699 * RX buffer was obtained from. The chip simply takes the original
4700 * descriptor as provided by the host, updates the status and length
4701 * field, then writes this into the next status ring entry.
4703 * Each ring the host uses to post buffers to the chip is described
4704 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4705 * it is first placed into the on-chip ram. When the packet's length
4706 * is known, it walks down the TG3_BDINFO entries to select the ring.
4707 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4708 * which is within the range of the new packet's length is chosen.
4710 * The "separate ring for rx status" scheme may sound queer, but it makes
4711 * sense from a cache coherency perspective. If only the host writes
4712 * to the buffer post rings, and only the chip writes to the rx status
4713 * rings, then cache lines never move beyond shared-modified state.
4714 * If both the host and chip were to write into the same ring, cache line
4715 * eviction could occur since both entities want it in an exclusive state.
4717 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4719 struct tg3 *tp = tnapi->tp;
4720 u32 work_mask, rx_std_posted = 0;
4721 u32 std_prod_idx, jmb_prod_idx;
4722 u32 sw_idx = tnapi->rx_rcb_ptr;
4725 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4727 hw_idx = *(tnapi->rx_rcb_prod_idx);
4729 * We need to order the read of hw_idx and the read of
4730 * the opaque cookie.
4735 std_prod_idx = tpr->rx_std_prod_idx;
4736 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4737 while (sw_idx != hw_idx && budget > 0) {
4738 struct ring_info *ri;
4739 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4741 struct sk_buff *skb;
4742 dma_addr_t dma_addr;
4743 u32 opaque_key, desc_idx, *post_ptr;
4745 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4746 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4747 if (opaque_key == RXD_OPAQUE_RING_STD) {
4748 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4749 dma_addr = dma_unmap_addr(ri, mapping);
4751 post_ptr = &std_prod_idx;
4753 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4754 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4755 dma_addr = dma_unmap_addr(ri, mapping);
4757 post_ptr = &jmb_prod_idx;
4759 goto next_pkt_nopost;
4761 work_mask |= opaque_key;
4763 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4764 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4766 tg3_recycle_rx(tnapi, tpr, opaque_key,
4767 desc_idx, *post_ptr);
4769 /* Other statistics kept track of by card. */
4774 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4777 if (len > TG3_RX_COPY_THRESH(tp)) {
4780 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4785 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4786 PCI_DMA_FROMDEVICE);
4788 /* Ensure that the update to the skb happens
4789 * after the usage of the old DMA mapping.
4797 struct sk_buff *copy_skb;
4799 tg3_recycle_rx(tnapi, tpr, opaque_key,
4800 desc_idx, *post_ptr);
4802 copy_skb = netdev_alloc_skb(tp->dev, len +
4804 if (copy_skb == NULL)
4805 goto drop_it_no_recycle;
4807 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4808 skb_put(copy_skb, len);
4809 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4810 skb_copy_from_linear_data(skb, copy_skb->data, len);
4811 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4813 /* We'll reuse the original ring buffer. */
4817 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4818 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4819 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4820 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4821 skb->ip_summed = CHECKSUM_UNNECESSARY;
4823 skb_checksum_none_assert(skb);
4825 skb->protocol = eth_type_trans(skb, tp->dev);
4827 if (len > (tp->dev->mtu + ETH_HLEN) &&
4828 skb->protocol != htons(ETH_P_8021Q)) {
4830 goto drop_it_no_recycle;
4833 if (desc->type_flags & RXD_FLAG_VLAN &&
4834 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4835 __vlan_hwaccel_put_tag(skb,
4836 desc->err_vlan & RXD_VLAN_MASK);
4838 napi_gro_receive(&tnapi->napi, skb);
4846 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4847 tpr->rx_std_prod_idx = std_prod_idx &
4848 tp->rx_std_ring_mask;
4849 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4850 tpr->rx_std_prod_idx);
4851 work_mask &= ~RXD_OPAQUE_RING_STD;
4856 sw_idx &= tp->rx_ret_ring_mask;
4858 /* Refresh hw_idx to see if there is new work */
4859 if (sw_idx == hw_idx) {
4860 hw_idx = *(tnapi->rx_rcb_prod_idx);
4865 /* ACK the status ring. */
4866 tnapi->rx_rcb_ptr = sw_idx;
4867 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4869 /* Refill RX ring(s). */
4870 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4871 if (work_mask & RXD_OPAQUE_RING_STD) {
4872 tpr->rx_std_prod_idx = std_prod_idx &
4873 tp->rx_std_ring_mask;
4874 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4875 tpr->rx_std_prod_idx);
4877 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4878 tpr->rx_jmb_prod_idx = jmb_prod_idx &
4879 tp->rx_jmb_ring_mask;
4880 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4881 tpr->rx_jmb_prod_idx);
4884 } else if (work_mask) {
4885 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4886 * updated before the producer indices can be updated.
4890 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
4891 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
4893 if (tnapi != &tp->napi[1])
4894 napi_schedule(&tp->napi[1].napi);
4900 static void tg3_poll_link(struct tg3 *tp)
4902 /* handle link change and other phy events */
4903 if (!(tp->tg3_flags &
4904 (TG3_FLAG_USE_LINKCHG_REG |
4905 TG3_FLAG_POLL_SERDES))) {
4906 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4908 if (sblk->status & SD_STATUS_LINK_CHG) {
4909 sblk->status = SD_STATUS_UPDATED |
4910 (sblk->status & ~SD_STATUS_LINK_CHG);
4911 spin_lock(&tp->lock);
4912 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4914 (MAC_STATUS_SYNC_CHANGED |
4915 MAC_STATUS_CFG_CHANGED |
4916 MAC_STATUS_MI_COMPLETION |
4917 MAC_STATUS_LNKSTATE_CHANGED));
4920 tg3_setup_phy(tp, 0);
4921 spin_unlock(&tp->lock);
4926 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4927 struct tg3_rx_prodring_set *dpr,
4928 struct tg3_rx_prodring_set *spr)
4930 u32 si, di, cpycnt, src_prod_idx;
4934 src_prod_idx = spr->rx_std_prod_idx;
4936 /* Make sure updates to the rx_std_buffers[] entries and the
4937 * standard producer index are seen in the correct order.
4941 if (spr->rx_std_cons_idx == src_prod_idx)
4944 if (spr->rx_std_cons_idx < src_prod_idx)
4945 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4947 cpycnt = tp->rx_std_ring_mask + 1 -
4948 spr->rx_std_cons_idx;
4950 cpycnt = min(cpycnt,
4951 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
4953 si = spr->rx_std_cons_idx;
4954 di = dpr->rx_std_prod_idx;
4956 for (i = di; i < di + cpycnt; i++) {
4957 if (dpr->rx_std_buffers[i].skb) {
4967 /* Ensure that updates to the rx_std_buffers ring and the
4968 * shadowed hardware producer ring from tg3_recycle_skb() are
4969 * ordered correctly WRT the skb check above.
4973 memcpy(&dpr->rx_std_buffers[di],
4974 &spr->rx_std_buffers[si],
4975 cpycnt * sizeof(struct ring_info));
4977 for (i = 0; i < cpycnt; i++, di++, si++) {
4978 struct tg3_rx_buffer_desc *sbd, *dbd;
4979 sbd = &spr->rx_std[si];
4980 dbd = &dpr->rx_std[di];
4981 dbd->addr_hi = sbd->addr_hi;
4982 dbd->addr_lo = sbd->addr_lo;
4985 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
4986 tp->rx_std_ring_mask;
4987 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
4988 tp->rx_std_ring_mask;
4992 src_prod_idx = spr->rx_jmb_prod_idx;
4994 /* Make sure updates to the rx_jmb_buffers[] entries and
4995 * the jumbo producer index are seen in the correct order.
4999 if (spr->rx_jmb_cons_idx == src_prod_idx)
5002 if (spr->rx_jmb_cons_idx < src_prod_idx)
5003 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5005 cpycnt = tp->rx_jmb_ring_mask + 1 -
5006 spr->rx_jmb_cons_idx;
5008 cpycnt = min(cpycnt,
5009 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5011 si = spr->rx_jmb_cons_idx;
5012 di = dpr->rx_jmb_prod_idx;
5014 for (i = di; i < di + cpycnt; i++) {
5015 if (dpr->rx_jmb_buffers[i].skb) {
5025 /* Ensure that updates to the rx_jmb_buffers ring and the
5026 * shadowed hardware producer ring from tg3_recycle_skb() are
5027 * ordered correctly WRT the skb check above.
5031 memcpy(&dpr->rx_jmb_buffers[di],
5032 &spr->rx_jmb_buffers[si],
5033 cpycnt * sizeof(struct ring_info));
5035 for (i = 0; i < cpycnt; i++, di++, si++) {
5036 struct tg3_rx_buffer_desc *sbd, *dbd;
5037 sbd = &spr->rx_jmb[si].std;
5038 dbd = &dpr->rx_jmb[di].std;
5039 dbd->addr_hi = sbd->addr_hi;
5040 dbd->addr_lo = sbd->addr_lo;
5043 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5044 tp->rx_jmb_ring_mask;
5045 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5046 tp->rx_jmb_ring_mask;
5052 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5054 struct tg3 *tp = tnapi->tp;
5056 /* run TX completion thread */
5057 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5059 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5063 /* run RX thread, within the bounds set by NAPI.
5064 * All RX "locking" is done by ensuring outside
5065 * code synchronizes with tg3->napi.poll()
5067 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5068 work_done += tg3_rx(tnapi, budget - work_done);
5070 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
5071 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5073 u32 std_prod_idx = dpr->rx_std_prod_idx;
5074 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5076 for (i = 1; i < tp->irq_cnt; i++)
5077 err |= tg3_rx_prodring_xfer(tp, dpr,
5078 &tp->napi[i].prodring);
5082 if (std_prod_idx != dpr->rx_std_prod_idx)
5083 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5084 dpr->rx_std_prod_idx);
5086 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5087 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5088 dpr->rx_jmb_prod_idx);
5093 tw32_f(HOSTCC_MODE, tp->coal_now);
5099 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5101 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5102 struct tg3 *tp = tnapi->tp;
5104 struct tg3_hw_status *sblk = tnapi->hw_status;
5107 work_done = tg3_poll_work(tnapi, work_done, budget);
5109 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5112 if (unlikely(work_done >= budget))
5115 /* tp->last_tag is used in tg3_int_reenable() below
5116 * to tell the hw how much work has been processed,
5117 * so we must read it before checking for more work.
5119 tnapi->last_tag = sblk->status_tag;
5120 tnapi->last_irq_tag = tnapi->last_tag;
5123 /* check for RX/TX work to do */
5124 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5125 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5126 napi_complete(napi);
5127 /* Reenable interrupts. */
5128 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5137 /* work_done is guaranteed to be less than budget. */
5138 napi_complete(napi);
5139 schedule_work(&tp->reset_task);
5143 static int tg3_poll(struct napi_struct *napi, int budget)
5145 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5146 struct tg3 *tp = tnapi->tp;
5148 struct tg3_hw_status *sblk = tnapi->hw_status;
5153 work_done = tg3_poll_work(tnapi, work_done, budget);
5155 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5158 if (unlikely(work_done >= budget))
5161 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5162 /* tp->last_tag is used in tg3_int_reenable() below
5163 * to tell the hw how much work has been processed,
5164 * so we must read it before checking for more work.
5166 tnapi->last_tag = sblk->status_tag;
5167 tnapi->last_irq_tag = tnapi->last_tag;
5170 sblk->status &= ~SD_STATUS_UPDATED;
5172 if (likely(!tg3_has_work(tnapi))) {
5173 napi_complete(napi);
5174 tg3_int_reenable(tnapi);
5182 /* work_done is guaranteed to be less than budget. */
5183 napi_complete(napi);
5184 schedule_work(&tp->reset_task);
5188 static void tg3_napi_disable(struct tg3 *tp)
5192 for (i = tp->irq_cnt - 1; i >= 0; i--)
5193 napi_disable(&tp->napi[i].napi);
5196 static void tg3_napi_enable(struct tg3 *tp)
5200 for (i = 0; i < tp->irq_cnt; i++)
5201 napi_enable(&tp->napi[i].napi);
5204 static void tg3_napi_init(struct tg3 *tp)
5208 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5209 for (i = 1; i < tp->irq_cnt; i++)
5210 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5213 static void tg3_napi_fini(struct tg3 *tp)
5217 for (i = 0; i < tp->irq_cnt; i++)
5218 netif_napi_del(&tp->napi[i].napi);
5221 static inline void tg3_netif_stop(struct tg3 *tp)
5223 tp->dev->trans_start = jiffies; /* prevent tx timeout */
5224 tg3_napi_disable(tp);
5225 netif_tx_disable(tp->dev);
5228 static inline void tg3_netif_start(struct tg3 *tp)
5230 /* NOTE: unconditional netif_tx_wake_all_queues is only
5231 * appropriate so long as all callers are assured to
5232 * have free tx slots (such as after tg3_init_hw)
5234 netif_tx_wake_all_queues(tp->dev);
5236 tg3_napi_enable(tp);
5237 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5238 tg3_enable_ints(tp);
5241 static void tg3_irq_quiesce(struct tg3 *tp)
5245 BUG_ON(tp->irq_sync);
5250 for (i = 0; i < tp->irq_cnt; i++)
5251 synchronize_irq(tp->napi[i].irq_vec);
5254 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5255 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5256 * with as well. Most of the time, this is not necessary except when
5257 * shutting down the device.
5259 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5261 spin_lock_bh(&tp->lock);
5263 tg3_irq_quiesce(tp);
5266 static inline void tg3_full_unlock(struct tg3 *tp)
5268 spin_unlock_bh(&tp->lock);
5271 /* One-shot MSI handler - Chip automatically disables interrupt
5272 * after sending MSI so driver doesn't have to do it.
5274 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5276 struct tg3_napi *tnapi = dev_id;
5277 struct tg3 *tp = tnapi->tp;
5279 prefetch(tnapi->hw_status);
5281 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5283 if (likely(!tg3_irq_sync(tp)))
5284 napi_schedule(&tnapi->napi);
5289 /* MSI ISR - No need to check for interrupt sharing and no need to
5290 * flush status block and interrupt mailbox. PCI ordering rules
5291 * guarantee that MSI will arrive after the status block.
5293 static irqreturn_t tg3_msi(int irq, void *dev_id)
5295 struct tg3_napi *tnapi = dev_id;
5296 struct tg3 *tp = tnapi->tp;
5298 prefetch(tnapi->hw_status);
5300 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5302 * Writing any value to intr-mbox-0 clears PCI INTA# and
5303 * chip-internal interrupt pending events.
5304 * Writing non-zero to intr-mbox-0 additional tells the
5305 * NIC to stop sending us irqs, engaging "in-intr-handler"
5308 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5309 if (likely(!tg3_irq_sync(tp)))
5310 napi_schedule(&tnapi->napi);
5312 return IRQ_RETVAL(1);
5315 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5317 struct tg3_napi *tnapi = dev_id;
5318 struct tg3 *tp = tnapi->tp;
5319 struct tg3_hw_status *sblk = tnapi->hw_status;
5320 unsigned int handled = 1;
5322 /* In INTx mode, it is possible for the interrupt to arrive at
5323 * the CPU before the status block posted prior to the interrupt.
5324 * Reading the PCI State register will confirm whether the
5325 * interrupt is ours and will flush the status block.
5327 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5328 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5329 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5336 * Writing any value to intr-mbox-0 clears PCI INTA# and
5337 * chip-internal interrupt pending events.
5338 * Writing non-zero to intr-mbox-0 additional tells the
5339 * NIC to stop sending us irqs, engaging "in-intr-handler"
5342 * Flush the mailbox to de-assert the IRQ immediately to prevent
5343 * spurious interrupts. The flush impacts performance but
5344 * excessive spurious interrupts can be worse in some cases.
5346 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5347 if (tg3_irq_sync(tp))
5349 sblk->status &= ~SD_STATUS_UPDATED;
5350 if (likely(tg3_has_work(tnapi))) {
5351 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5352 napi_schedule(&tnapi->napi);
5354 /* No work, shared interrupt perhaps? re-enable
5355 * interrupts, and flush that PCI write
5357 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5361 return IRQ_RETVAL(handled);
5364 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5366 struct tg3_napi *tnapi = dev_id;
5367 struct tg3 *tp = tnapi->tp;
5368 struct tg3_hw_status *sblk = tnapi->hw_status;
5369 unsigned int handled = 1;
5371 /* In INTx mode, it is possible for the interrupt to arrive at
5372 * the CPU before the status block posted prior to the interrupt.
5373 * Reading the PCI State register will confirm whether the
5374 * interrupt is ours and will flush the status block.
5376 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5377 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5378 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5385 * writing any value to intr-mbox-0 clears PCI INTA# and
5386 * chip-internal interrupt pending events.
5387 * writing non-zero to intr-mbox-0 additional tells the
5388 * NIC to stop sending us irqs, engaging "in-intr-handler"
5391 * Flush the mailbox to de-assert the IRQ immediately to prevent
5392 * spurious interrupts. The flush impacts performance but
5393 * excessive spurious interrupts can be worse in some cases.
5395 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5398 * In a shared interrupt configuration, sometimes other devices'
5399 * interrupts will scream. We record the current status tag here
5400 * so that the above check can report that the screaming interrupts
5401 * are unhandled. Eventually they will be silenced.
5403 tnapi->last_irq_tag = sblk->status_tag;
5405 if (tg3_irq_sync(tp))
5408 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5410 napi_schedule(&tnapi->napi);
5413 return IRQ_RETVAL(handled);
5416 /* ISR for interrupt test */
5417 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5419 struct tg3_napi *tnapi = dev_id;
5420 struct tg3 *tp = tnapi->tp;
5421 struct tg3_hw_status *sblk = tnapi->hw_status;
5423 if ((sblk->status & SD_STATUS_UPDATED) ||
5424 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5425 tg3_disable_ints(tp);
5426 return IRQ_RETVAL(1);
5428 return IRQ_RETVAL(0);
5431 static int tg3_init_hw(struct tg3 *, int);
5432 static int tg3_halt(struct tg3 *, int, int);
5434 /* Restart hardware after configuration changes, self-test, etc.
5435 * Invoked with tp->lock held.
5437 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5438 __releases(tp->lock)
5439 __acquires(tp->lock)
5443 err = tg3_init_hw(tp, reset_phy);
5446 "Failed to re-initialize device, aborting\n");
5447 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5448 tg3_full_unlock(tp);
5449 del_timer_sync(&tp->timer);
5451 tg3_napi_enable(tp);
5453 tg3_full_lock(tp, 0);
5458 #ifdef CONFIG_NET_POLL_CONTROLLER
5459 static void tg3_poll_controller(struct net_device *dev)
5462 struct tg3 *tp = netdev_priv(dev);
5464 for (i = 0; i < tp->irq_cnt; i++)
5465 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5469 static void tg3_reset_task(struct work_struct *work)
5471 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5473 unsigned int restart_timer;
5475 tg3_full_lock(tp, 0);
5477 if (!netif_running(tp->dev)) {
5478 tg3_full_unlock(tp);
5482 tg3_full_unlock(tp);
5488 tg3_full_lock(tp, 1);
5490 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5491 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5493 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5494 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5495 tp->write32_rx_mbox = tg3_write_flush_reg32;
5496 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5497 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5500 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5501 err = tg3_init_hw(tp, 1);
5505 tg3_netif_start(tp);
5508 mod_timer(&tp->timer, jiffies + 1);
5511 tg3_full_unlock(tp);
5517 static void tg3_dump_short_state(struct tg3 *tp)
5519 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5520 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5521 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5522 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5525 static void tg3_tx_timeout(struct net_device *dev)
5527 struct tg3 *tp = netdev_priv(dev);
5529 if (netif_msg_tx_err(tp)) {
5530 netdev_err(dev, "transmit timed out, resetting\n");
5531 tg3_dump_short_state(tp);
5534 schedule_work(&tp->reset_task);
5537 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5538 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5540 u32 base = (u32) mapping & 0xffffffff;
5542 return (base > 0xffffdcc0) && (base + len + 8 < base);
5545 /* Test for DMA addresses > 40-bit */
5546 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5549 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5550 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5551 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5558 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5560 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5561 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5562 struct sk_buff *skb, u32 last_plus_one,
5563 u32 *start, u32 base_flags, u32 mss)
5565 struct tg3 *tp = tnapi->tp;
5566 struct sk_buff *new_skb;
5567 dma_addr_t new_addr = 0;
5571 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5572 new_skb = skb_copy(skb, GFP_ATOMIC);
5574 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5576 new_skb = skb_copy_expand(skb,
5577 skb_headroom(skb) + more_headroom,
5578 skb_tailroom(skb), GFP_ATOMIC);
5584 /* New SKB is guaranteed to be linear. */
5586 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5588 /* Make sure the mapping succeeded */
5589 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5591 dev_kfree_skb(new_skb);
5594 /* Make sure new skb does not cross any 4G boundaries.
5595 * Drop the packet if it does.
5597 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5598 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5599 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5602 dev_kfree_skb(new_skb);
5605 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5606 base_flags, 1 | (mss << 1));
5607 *start = NEXT_TX(entry);
5611 /* Now clean up the sw ring entries. */
5613 while (entry != last_plus_one) {
5617 len = skb_headlen(skb);
5619 len = skb_shinfo(skb)->frags[i-1].size;
5621 pci_unmap_single(tp->pdev,
5622 dma_unmap_addr(&tnapi->tx_buffers[entry],
5624 len, PCI_DMA_TODEVICE);
5626 tnapi->tx_buffers[entry].skb = new_skb;
5627 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5630 tnapi->tx_buffers[entry].skb = NULL;
5632 entry = NEXT_TX(entry);
5641 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5642 dma_addr_t mapping, int len, u32 flags,
5645 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5646 int is_end = (mss_and_is_end & 0x1);
5647 u32 mss = (mss_and_is_end >> 1);
5651 flags |= TXD_FLAG_END;
5652 if (flags & TXD_FLAG_VLAN) {
5653 vlan_tag = flags >> 16;
5656 vlan_tag |= (mss << TXD_MSS_SHIFT);
5658 txd->addr_hi = ((u64) mapping >> 32);
5659 txd->addr_lo = ((u64) mapping & 0xffffffff);
5660 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5661 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5664 /* hard_start_xmit for devices that don't have any bugs and
5665 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5667 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5668 struct net_device *dev)
5670 struct tg3 *tp = netdev_priv(dev);
5671 u32 len, entry, base_flags, mss;
5673 struct tg3_napi *tnapi;
5674 struct netdev_queue *txq;
5675 unsigned int i, last;
5677 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5678 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5679 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5682 /* We are running in BH disabled context with netif_tx_lock
5683 * and TX reclaim runs via tp->napi.poll inside of a software
5684 * interrupt. Furthermore, IRQ processing runs lockless so we have
5685 * no IRQ context deadlocks to worry about either. Rejoice!
5687 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5688 if (!netif_tx_queue_stopped(txq)) {
5689 netif_tx_stop_queue(txq);
5691 /* This is a hard error, log it. */
5693 "BUG! Tx Ring full when queue awake!\n");
5695 return NETDEV_TX_BUSY;
5698 entry = tnapi->tx_prod;
5700 mss = skb_shinfo(skb)->gso_size;
5702 int tcp_opt_len, ip_tcp_len;
5705 if (skb_header_cloned(skb) &&
5706 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5711 if (skb_is_gso_v6(skb)) {
5712 hdrlen = skb_headlen(skb) - ETH_HLEN;
5714 struct iphdr *iph = ip_hdr(skb);
5716 tcp_opt_len = tcp_optlen(skb);
5717 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5720 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5721 hdrlen = ip_tcp_len + tcp_opt_len;
5724 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5725 mss |= (hdrlen & 0xc) << 12;
5727 base_flags |= 0x00000010;
5728 base_flags |= (hdrlen & 0x3e0) << 5;
5732 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5733 TXD_FLAG_CPU_POST_DMA);
5735 tcp_hdr(skb)->check = 0;
5737 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5738 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5741 if (vlan_tx_tag_present(skb))
5742 base_flags |= (TXD_FLAG_VLAN |
5743 (vlan_tx_tag_get(skb) << 16));
5745 len = skb_headlen(skb);
5747 /* Queue skb data, a.k.a. the main skb fragment. */
5748 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5749 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5754 tnapi->tx_buffers[entry].skb = skb;
5755 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5757 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5758 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5759 base_flags |= TXD_FLAG_JMB_PKT;
5761 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5762 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5764 entry = NEXT_TX(entry);
5766 /* Now loop through additional data fragments, and queue them. */
5767 if (skb_shinfo(skb)->nr_frags > 0) {
5768 last = skb_shinfo(skb)->nr_frags - 1;
5769 for (i = 0; i <= last; i++) {
5770 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5773 mapping = pci_map_page(tp->pdev,
5776 len, PCI_DMA_TODEVICE);
5777 if (pci_dma_mapping_error(tp->pdev, mapping))
5780 tnapi->tx_buffers[entry].skb = NULL;
5781 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5784 tg3_set_txd(tnapi, entry, mapping, len,
5785 base_flags, (i == last) | (mss << 1));
5787 entry = NEXT_TX(entry);
5791 /* Packets are ready, update Tx producer idx local and on card. */
5792 tw32_tx_mbox(tnapi->prodmbox, entry);
5794 tnapi->tx_prod = entry;
5795 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5796 netif_tx_stop_queue(txq);
5798 /* netif_tx_stop_queue() must be done before checking
5799 * checking tx index in tg3_tx_avail() below, because in
5800 * tg3_tx(), we update tx index before checking for
5801 * netif_tx_queue_stopped().
5804 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5805 netif_tx_wake_queue(txq);
5811 return NETDEV_TX_OK;
5815 entry = tnapi->tx_prod;
5816 tnapi->tx_buffers[entry].skb = NULL;
5817 pci_unmap_single(tp->pdev,
5818 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5821 for (i = 0; i <= last; i++) {
5822 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5823 entry = NEXT_TX(entry);
5825 pci_unmap_page(tp->pdev,
5826 dma_unmap_addr(&tnapi->tx_buffers[entry],
5828 frag->size, PCI_DMA_TODEVICE);
5832 return NETDEV_TX_OK;
5835 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5836 struct net_device *);
5838 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5839 * TSO header is greater than 80 bytes.
5841 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5843 struct sk_buff *segs, *nskb;
5844 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5846 /* Estimate the number of fragments in the worst case */
5847 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5848 netif_stop_queue(tp->dev);
5850 /* netif_tx_stop_queue() must be done before checking
5851 * checking tx index in tg3_tx_avail() below, because in
5852 * tg3_tx(), we update tx index before checking for
5853 * netif_tx_queue_stopped().
5856 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5857 return NETDEV_TX_BUSY;
5859 netif_wake_queue(tp->dev);
5862 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5864 goto tg3_tso_bug_end;
5870 tg3_start_xmit_dma_bug(nskb, tp->dev);
5876 return NETDEV_TX_OK;
5879 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5880 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5882 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5883 struct net_device *dev)
5885 struct tg3 *tp = netdev_priv(dev);
5886 u32 len, entry, base_flags, mss;
5887 int would_hit_hwbug;
5889 struct tg3_napi *tnapi;
5890 struct netdev_queue *txq;
5891 unsigned int i, last;
5893 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5894 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5895 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5898 /* We are running in BH disabled context with netif_tx_lock
5899 * and TX reclaim runs via tp->napi.poll inside of a software
5900 * interrupt. Furthermore, IRQ processing runs lockless so we have
5901 * no IRQ context deadlocks to worry about either. Rejoice!
5903 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5904 if (!netif_tx_queue_stopped(txq)) {
5905 netif_tx_stop_queue(txq);
5907 /* This is a hard error, log it. */
5909 "BUG! Tx Ring full when queue awake!\n");
5911 return NETDEV_TX_BUSY;
5914 entry = tnapi->tx_prod;
5916 if (skb->ip_summed == CHECKSUM_PARTIAL)
5917 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5919 mss = skb_shinfo(skb)->gso_size;
5922 u32 tcp_opt_len, hdr_len;
5924 if (skb_header_cloned(skb) &&
5925 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5931 tcp_opt_len = tcp_optlen(skb);
5933 if (skb_is_gso_v6(skb)) {
5934 hdr_len = skb_headlen(skb) - ETH_HLEN;
5938 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5939 hdr_len = ip_tcp_len + tcp_opt_len;
5942 iph->tot_len = htons(mss + hdr_len);
5945 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5946 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5947 return tg3_tso_bug(tp, skb);
5949 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5950 TXD_FLAG_CPU_POST_DMA);
5952 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5953 tcp_hdr(skb)->check = 0;
5954 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5956 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5961 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5962 mss |= (hdr_len & 0xc) << 12;
5964 base_flags |= 0x00000010;
5965 base_flags |= (hdr_len & 0x3e0) << 5;
5966 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5967 mss |= hdr_len << 9;
5968 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5969 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5970 if (tcp_opt_len || iph->ihl > 5) {
5973 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5974 mss |= (tsflags << 11);
5977 if (tcp_opt_len || iph->ihl > 5) {
5980 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5981 base_flags |= tsflags << 12;
5986 if (vlan_tx_tag_present(skb))
5987 base_flags |= (TXD_FLAG_VLAN |
5988 (vlan_tx_tag_get(skb) << 16));
5990 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5991 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5992 base_flags |= TXD_FLAG_JMB_PKT;
5994 len = skb_headlen(skb);
5996 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5997 if (pci_dma_mapping_error(tp->pdev, mapping)) {
6002 tnapi->tx_buffers[entry].skb = skb;
6003 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6005 would_hit_hwbug = 0;
6007 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
6008 would_hit_hwbug = 1;
6010 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6011 tg3_4g_overflow_test(mapping, len))
6012 would_hit_hwbug = 1;
6014 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6015 tg3_40bit_overflow_test(tp, mapping, len))
6016 would_hit_hwbug = 1;
6018 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
6019 would_hit_hwbug = 1;
6021 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6022 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6024 entry = NEXT_TX(entry);
6026 /* Now loop through additional data fragments, and queue them. */
6027 if (skb_shinfo(skb)->nr_frags > 0) {
6028 last = skb_shinfo(skb)->nr_frags - 1;
6029 for (i = 0; i <= last; i++) {
6030 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6033 mapping = pci_map_page(tp->pdev,
6036 len, PCI_DMA_TODEVICE);
6038 tnapi->tx_buffers[entry].skb = NULL;
6039 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6041 if (pci_dma_mapping_error(tp->pdev, mapping))
6044 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
6046 would_hit_hwbug = 1;
6048 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
6049 tg3_4g_overflow_test(mapping, len))
6050 would_hit_hwbug = 1;
6052 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
6053 tg3_40bit_overflow_test(tp, mapping, len))
6054 would_hit_hwbug = 1;
6056 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6057 tg3_set_txd(tnapi, entry, mapping, len,
6058 base_flags, (i == last)|(mss << 1));
6060 tg3_set_txd(tnapi, entry, mapping, len,
6061 base_flags, (i == last));
6063 entry = NEXT_TX(entry);
6067 if (would_hit_hwbug) {
6068 u32 last_plus_one = entry;
6071 start = entry - 1 - skb_shinfo(skb)->nr_frags;
6072 start &= (TG3_TX_RING_SIZE - 1);
6074 /* If the workaround fails due to memory/mapping
6075 * failure, silently drop this packet.
6077 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
6078 &start, base_flags, mss))
6084 /* Packets are ready, update Tx producer idx local and on card. */
6085 tw32_tx_mbox(tnapi->prodmbox, entry);
6087 tnapi->tx_prod = entry;
6088 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6089 netif_tx_stop_queue(txq);
6091 /* netif_tx_stop_queue() must be done before checking
6092 * checking tx index in tg3_tx_avail() below, because in
6093 * tg3_tx(), we update tx index before checking for
6094 * netif_tx_queue_stopped().
6097 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6098 netif_tx_wake_queue(txq);
6104 return NETDEV_TX_OK;
6108 entry = tnapi->tx_prod;
6109 tnapi->tx_buffers[entry].skb = NULL;
6110 pci_unmap_single(tp->pdev,
6111 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
6114 for (i = 0; i <= last; i++) {
6115 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6116 entry = NEXT_TX(entry);
6118 pci_unmap_page(tp->pdev,
6119 dma_unmap_addr(&tnapi->tx_buffers[entry],
6121 frag->size, PCI_DMA_TODEVICE);
6125 return NETDEV_TX_OK;
6128 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6133 if (new_mtu > ETH_DATA_LEN) {
6134 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6135 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
6136 ethtool_op_set_tso(dev, 0);
6138 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
6141 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6142 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
6143 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
6147 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6149 struct tg3 *tp = netdev_priv(dev);
6152 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6155 if (!netif_running(dev)) {
6156 /* We'll just catch it later when the
6159 tg3_set_mtu(dev, tp, new_mtu);
6167 tg3_full_lock(tp, 1);
6169 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6171 tg3_set_mtu(dev, tp, new_mtu);
6173 err = tg3_restart_hw(tp, 0);
6176 tg3_netif_start(tp);
6178 tg3_full_unlock(tp);
6186 static void tg3_rx_prodring_free(struct tg3 *tp,
6187 struct tg3_rx_prodring_set *tpr)
6191 if (tpr != &tp->napi[0].prodring) {
6192 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6193 i = (i + 1) & tp->rx_std_ring_mask)
6194 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6197 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6198 for (i = tpr->rx_jmb_cons_idx;
6199 i != tpr->rx_jmb_prod_idx;
6200 i = (i + 1) & tp->rx_jmb_ring_mask) {
6201 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6209 for (i = 0; i <= tp->rx_std_ring_mask; i++)
6210 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6213 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6214 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6215 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6216 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6221 /* Initialize rx rings for packet processing.
6223 * The chip has been shut down and the driver detached from
6224 * the networking, so no interrupts or new tx packets will
6225 * end up in the driver. tp->{tx,}lock are held and thus
6228 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6229 struct tg3_rx_prodring_set *tpr)
6231 u32 i, rx_pkt_dma_sz;
6233 tpr->rx_std_cons_idx = 0;
6234 tpr->rx_std_prod_idx = 0;
6235 tpr->rx_jmb_cons_idx = 0;
6236 tpr->rx_jmb_prod_idx = 0;
6238 if (tpr != &tp->napi[0].prodring) {
6239 memset(&tpr->rx_std_buffers[0], 0,
6240 TG3_RX_STD_BUFF_RING_SIZE(tp));
6241 if (tpr->rx_jmb_buffers)
6242 memset(&tpr->rx_jmb_buffers[0], 0,
6243 TG3_RX_JMB_BUFF_RING_SIZE(tp));
6247 /* Zero out all descriptors. */
6248 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6250 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6251 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6252 tp->dev->mtu > ETH_DATA_LEN)
6253 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6254 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6256 /* Initialize invariants of the rings, we only set this
6257 * stuff once. This works because the card does not
6258 * write into the rx buffer posting rings.
6260 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6261 struct tg3_rx_buffer_desc *rxd;
6263 rxd = &tpr->rx_std[i];
6264 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6265 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6266 rxd->opaque = (RXD_OPAQUE_RING_STD |
6267 (i << RXD_OPAQUE_INDEX_SHIFT));
6270 /* Now allocate fresh SKBs for each rx ring. */
6271 for (i = 0; i < tp->rx_pending; i++) {
6272 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6273 netdev_warn(tp->dev,
6274 "Using a smaller RX standard ring. Only "
6275 "%d out of %d buffers were allocated "
6276 "successfully\n", i, tp->rx_pending);
6284 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ||
6285 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
6288 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6290 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6293 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6294 struct tg3_rx_buffer_desc *rxd;
6296 rxd = &tpr->rx_jmb[i].std;
6297 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6298 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6300 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6301 (i << RXD_OPAQUE_INDEX_SHIFT));
6304 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6305 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6306 netdev_warn(tp->dev,
6307 "Using a smaller RX jumbo ring. Only %d "
6308 "out of %d buffers were allocated "
6309 "successfully\n", i, tp->rx_jumbo_pending);
6312 tp->rx_jumbo_pending = i;
6321 tg3_rx_prodring_free(tp, tpr);
6325 static void tg3_rx_prodring_fini(struct tg3 *tp,
6326 struct tg3_rx_prodring_set *tpr)
6328 kfree(tpr->rx_std_buffers);
6329 tpr->rx_std_buffers = NULL;
6330 kfree(tpr->rx_jmb_buffers);
6331 tpr->rx_jmb_buffers = NULL;
6333 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6334 tpr->rx_std, tpr->rx_std_mapping);
6338 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6339 tpr->rx_jmb, tpr->rx_jmb_mapping);
6344 static int tg3_rx_prodring_init(struct tg3 *tp,
6345 struct tg3_rx_prodring_set *tpr)
6347 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6349 if (!tpr->rx_std_buffers)
6352 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6353 TG3_RX_STD_RING_BYTES(tp),
6354 &tpr->rx_std_mapping,
6359 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
6360 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
6361 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6363 if (!tpr->rx_jmb_buffers)
6366 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6367 TG3_RX_JMB_RING_BYTES(tp),
6368 &tpr->rx_jmb_mapping,
6377 tg3_rx_prodring_fini(tp, tpr);
6381 /* Free up pending packets in all rx/tx rings.
6383 * The chip has been shut down and the driver detached from
6384 * the networking, so no interrupts or new tx packets will
6385 * end up in the driver. tp->{tx,}lock is not held and we are not
6386 * in an interrupt context and thus may sleep.
6388 static void tg3_free_rings(struct tg3 *tp)
6392 for (j = 0; j < tp->irq_cnt; j++) {
6393 struct tg3_napi *tnapi = &tp->napi[j];
6395 tg3_rx_prodring_free(tp, &tnapi->prodring);
6397 if (!tnapi->tx_buffers)
6400 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6401 struct ring_info *txp;
6402 struct sk_buff *skb;
6405 txp = &tnapi->tx_buffers[i];
6413 pci_unmap_single(tp->pdev,
6414 dma_unmap_addr(txp, mapping),
6421 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6422 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6423 pci_unmap_page(tp->pdev,
6424 dma_unmap_addr(txp, mapping),
6425 skb_shinfo(skb)->frags[k].size,
6430 dev_kfree_skb_any(skb);
6435 /* Initialize tx/rx rings for packet processing.
6437 * The chip has been shut down and the driver detached from
6438 * the networking, so no interrupts or new tx packets will
6439 * end up in the driver. tp->{tx,}lock are held and thus
6442 static int tg3_init_rings(struct tg3 *tp)
6446 /* Free up all the SKBs. */
6449 for (i = 0; i < tp->irq_cnt; i++) {
6450 struct tg3_napi *tnapi = &tp->napi[i];
6452 tnapi->last_tag = 0;
6453 tnapi->last_irq_tag = 0;
6454 tnapi->hw_status->status = 0;
6455 tnapi->hw_status->status_tag = 0;
6456 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6461 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6463 tnapi->rx_rcb_ptr = 0;
6465 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6467 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6477 * Must not be invoked with interrupt sources disabled and
6478 * the hardware shutdown down.
6480 static void tg3_free_consistent(struct tg3 *tp)
6484 for (i = 0; i < tp->irq_cnt; i++) {
6485 struct tg3_napi *tnapi = &tp->napi[i];
6487 if (tnapi->tx_ring) {
6488 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6489 tnapi->tx_ring, tnapi->tx_desc_mapping);
6490 tnapi->tx_ring = NULL;
6493 kfree(tnapi->tx_buffers);
6494 tnapi->tx_buffers = NULL;
6496 if (tnapi->rx_rcb) {
6497 dma_free_coherent(&tp->pdev->dev,
6498 TG3_RX_RCB_RING_BYTES(tp),
6500 tnapi->rx_rcb_mapping);
6501 tnapi->rx_rcb = NULL;
6504 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6506 if (tnapi->hw_status) {
6507 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6509 tnapi->status_mapping);
6510 tnapi->hw_status = NULL;
6515 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6516 tp->hw_stats, tp->stats_mapping);
6517 tp->hw_stats = NULL;
6522 * Must not be invoked with interrupt sources disabled and
6523 * the hardware shutdown down. Can sleep.
6525 static int tg3_alloc_consistent(struct tg3 *tp)
6529 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6530 sizeof(struct tg3_hw_stats),
6536 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6538 for (i = 0; i < tp->irq_cnt; i++) {
6539 struct tg3_napi *tnapi = &tp->napi[i];
6540 struct tg3_hw_status *sblk;
6542 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6544 &tnapi->status_mapping,
6546 if (!tnapi->hw_status)
6549 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6550 sblk = tnapi->hw_status;
6552 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6555 /* If multivector TSS is enabled, vector 0 does not handle
6556 * tx interrupts. Don't allocate any resources for it.
6558 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6559 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6560 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6563 if (!tnapi->tx_buffers)
6566 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6568 &tnapi->tx_desc_mapping,
6570 if (!tnapi->tx_ring)
6575 * When RSS is enabled, the status block format changes
6576 * slightly. The "rx_jumbo_consumer", "reserved",
6577 * and "rx_mini_consumer" members get mapped to the
6578 * other three rx return ring producer indexes.
6582 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6585 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6588 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6591 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6596 * If multivector RSS is enabled, vector 0 does not handle
6597 * rx or tx interrupts. Don't allocate any resources for it.
6599 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6603 TG3_RX_RCB_RING_BYTES(tp),
6604 &tnapi->rx_rcb_mapping,
6609 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6615 tg3_free_consistent(tp);
6619 #define MAX_WAIT_CNT 1000
6621 /* To stop a block, clear the enable bit and poll till it
6622 * clears. tp->lock is held.
6624 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6629 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6636 /* We can't enable/disable these bits of the
6637 * 5705/5750, just say success.
6650 for (i = 0; i < MAX_WAIT_CNT; i++) {
6653 if ((val & enable_bit) == 0)
6657 if (i == MAX_WAIT_CNT && !silent) {
6658 dev_err(&tp->pdev->dev,
6659 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6667 /* tp->lock is held. */
6668 static int tg3_abort_hw(struct tg3 *tp, int silent)
6672 tg3_disable_ints(tp);
6674 tp->rx_mode &= ~RX_MODE_ENABLE;
6675 tw32_f(MAC_RX_MODE, tp->rx_mode);
6678 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6679 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6680 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6681 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6682 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6683 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6685 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6686 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6687 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6688 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6689 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6690 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6691 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6693 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6694 tw32_f(MAC_MODE, tp->mac_mode);
6697 tp->tx_mode &= ~TX_MODE_ENABLE;
6698 tw32_f(MAC_TX_MODE, tp->tx_mode);
6700 for (i = 0; i < MAX_WAIT_CNT; i++) {
6702 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6705 if (i >= MAX_WAIT_CNT) {
6706 dev_err(&tp->pdev->dev,
6707 "%s timed out, TX_MODE_ENABLE will not clear "
6708 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6712 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6713 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6714 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6716 tw32(FTQ_RESET, 0xffffffff);
6717 tw32(FTQ_RESET, 0x00000000);
6719 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6720 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6722 for (i = 0; i < tp->irq_cnt; i++) {
6723 struct tg3_napi *tnapi = &tp->napi[i];
6724 if (tnapi->hw_status)
6725 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6728 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6733 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6738 /* NCSI does not support APE events */
6739 if (tp->tg3_flags3 & TG3_FLG3_APE_HAS_NCSI)
6742 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6743 if (apedata != APE_SEG_SIG_MAGIC)
6746 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6747 if (!(apedata & APE_FW_STATUS_READY))
6750 /* Wait for up to 1 millisecond for APE to service previous event. */
6751 for (i = 0; i < 10; i++) {
6752 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6755 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6757 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6758 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6759 event | APE_EVENT_STATUS_EVENT_PENDING);
6761 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6763 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6769 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6770 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6773 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6778 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6782 case RESET_KIND_INIT:
6783 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6784 APE_HOST_SEG_SIG_MAGIC);
6785 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6786 APE_HOST_SEG_LEN_MAGIC);
6787 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6788 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6789 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6790 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6791 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6792 APE_HOST_BEHAV_NO_PHYLOCK);
6793 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6794 TG3_APE_HOST_DRVR_STATE_START);
6796 event = APE_EVENT_STATUS_STATE_START;
6798 case RESET_KIND_SHUTDOWN:
6799 /* With the interface we are currently using,
6800 * APE does not track driver state. Wiping
6801 * out the HOST SEGMENT SIGNATURE forces
6802 * the APE to assume OS absent status.
6804 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6806 if (device_may_wakeup(&tp->pdev->dev) &&
6807 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)) {
6808 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6809 TG3_APE_HOST_WOL_SPEED_AUTO);
6810 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6812 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6814 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6816 event = APE_EVENT_STATUS_STATE_UNLOAD;
6818 case RESET_KIND_SUSPEND:
6819 event = APE_EVENT_STATUS_STATE_SUSPEND;
6825 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6827 tg3_ape_send_event(tp, event);
6830 /* tp->lock is held. */
6831 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6833 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6834 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6836 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6838 case RESET_KIND_INIT:
6839 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6843 case RESET_KIND_SHUTDOWN:
6844 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6848 case RESET_KIND_SUSPEND:
6849 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6858 if (kind == RESET_KIND_INIT ||
6859 kind == RESET_KIND_SUSPEND)
6860 tg3_ape_driver_state_change(tp, kind);
6863 /* tp->lock is held. */
6864 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6866 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6868 case RESET_KIND_INIT:
6869 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6870 DRV_STATE_START_DONE);
6873 case RESET_KIND_SHUTDOWN:
6874 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6875 DRV_STATE_UNLOAD_DONE);
6883 if (kind == RESET_KIND_SHUTDOWN)
6884 tg3_ape_driver_state_change(tp, kind);
6887 /* tp->lock is held. */
6888 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6890 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6892 case RESET_KIND_INIT:
6893 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6897 case RESET_KIND_SHUTDOWN:
6898 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6902 case RESET_KIND_SUSPEND:
6903 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6913 static int tg3_poll_fw(struct tg3 *tp)
6918 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6919 /* Wait up to 20ms for init done. */
6920 for (i = 0; i < 200; i++) {
6921 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6928 /* Wait for firmware initialization to complete. */
6929 for (i = 0; i < 100000; i++) {
6930 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6931 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6936 /* Chip might not be fitted with firmware. Some Sun onboard
6937 * parts are configured like that. So don't signal the timeout
6938 * of the above loop as an error, but do report the lack of
6939 * running firmware once.
6942 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6943 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6945 netdev_info(tp->dev, "No firmware running\n");
6948 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6949 /* The 57765 A0 needs a little more
6950 * time to do some important work.
6958 /* Save PCI command register before chip reset */
6959 static void tg3_save_pci_state(struct tg3 *tp)
6961 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6964 /* Restore PCI state after chip reset */
6965 static void tg3_restore_pci_state(struct tg3 *tp)
6969 /* Re-enable indirect register accesses. */
6970 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6971 tp->misc_host_ctrl);
6973 /* Set MAX PCI retry to zero. */
6974 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6975 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6976 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6977 val |= PCISTATE_RETRY_SAME_DMA;
6978 /* Allow reads and writes to the APE register and memory space. */
6979 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6980 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6981 PCISTATE_ALLOW_APE_SHMEM_WR |
6982 PCISTATE_ALLOW_APE_PSPACE_WR;
6983 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6985 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6987 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6988 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6989 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6992 tp->pci_cacheline_sz);
6993 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6998 /* Make sure PCI-X relaxed ordering bit is clear. */
6999 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7002 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7004 pcix_cmd &= ~PCI_X_CMD_ERO;
7005 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7009 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
7011 /* Chip reset on 5780 will reset MSI enable bit,
7012 * so need to restore it.
7014 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
7017 pci_read_config_word(tp->pdev,
7018 tp->msi_cap + PCI_MSI_FLAGS,
7020 pci_write_config_word(tp->pdev,
7021 tp->msi_cap + PCI_MSI_FLAGS,
7022 ctrl | PCI_MSI_FLAGS_ENABLE);
7023 val = tr32(MSGINT_MODE);
7024 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7029 static void tg3_stop_fw(struct tg3 *);
7031 /* tp->lock is held. */
7032 static int tg3_chip_reset(struct tg3 *tp)
7035 void (*write_op)(struct tg3 *, u32, u32);
7040 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7042 /* No matching tg3_nvram_unlock() after this because
7043 * chip reset below will undo the nvram lock.
7045 tp->nvram_lock_cnt = 0;
7047 /* GRC_MISC_CFG core clock reset will clear the memory
7048 * enable bit in PCI register 4 and the MSI enable bit
7049 * on some chips, so we save relevant registers here.
7051 tg3_save_pci_state(tp);
7053 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7054 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7055 tw32(GRC_FASTBOOT_PC, 0);
7058 * We must avoid the readl() that normally takes place.
7059 * It locks machines, causes machine checks, and other
7060 * fun things. So, temporarily disable the 5701
7061 * hardware workaround, while we do the reset.
7063 write_op = tp->write32;
7064 if (write_op == tg3_write_flush_reg32)
7065 tp->write32 = tg3_write32;
7067 /* Prevent the irq handler from reading or writing PCI registers
7068 * during chip reset when the memory enable bit in the PCI command
7069 * register may be cleared. The chip does not generate interrupt
7070 * at this time, but the irq handler may still be called due to irq
7071 * sharing or irqpoll.
7073 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
7074 for (i = 0; i < tp->irq_cnt; i++) {
7075 struct tg3_napi *tnapi = &tp->napi[i];
7076 if (tnapi->hw_status) {
7077 tnapi->hw_status->status = 0;
7078 tnapi->hw_status->status_tag = 0;
7080 tnapi->last_tag = 0;
7081 tnapi->last_irq_tag = 0;
7085 for (i = 0; i < tp->irq_cnt; i++)
7086 synchronize_irq(tp->napi[i].irq_vec);
7088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7089 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7090 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7094 val = GRC_MISC_CFG_CORECLK_RESET;
7096 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
7097 /* Force PCIe 1.0a mode */
7098 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7099 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
7100 tr32(TG3_PCIE_PHY_TSTCTL) ==
7101 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7102 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7104 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7105 tw32(GRC_MISC_CFG, (1 << 29));
7110 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7111 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7112 tw32(GRC_VCPU_EXT_CTRL,
7113 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7116 /* Manage gphy power for all CPMU absent PCIe devices. */
7117 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7118 !(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7119 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7121 tw32(GRC_MISC_CFG, val);
7123 /* restore 5701 hardware bug workaround write method */
7124 tp->write32 = write_op;
7126 /* Unfortunately, we have to delay before the PCI read back.
7127 * Some 575X chips even will not respond to a PCI cfg access
7128 * when the reset command is given to the chip.
7130 * How do these hardware designers expect things to work
7131 * properly if the PCI write is posted for a long period
7132 * of time? It is always necessary to have some method by
7133 * which a register read back can occur to push the write
7134 * out which does the reset.
7136 * For most tg3 variants the trick below was working.
7141 /* Flush PCI posted writes. The normal MMIO registers
7142 * are inaccessible at this time so this is the only
7143 * way to make this reliably (actually, this is no longer
7144 * the case, see above). I tried to use indirect
7145 * register read/write but this upset some 5701 variants.
7147 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7151 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
7154 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7158 /* Wait for link training to complete. */
7159 for (i = 0; i < 5000; i++)
7162 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7163 pci_write_config_dword(tp->pdev, 0xc4,
7164 cfg_val | (1 << 15));
7167 /* Clear the "no snoop" and "relaxed ordering" bits. */
7168 pci_read_config_word(tp->pdev,
7169 tp->pcie_cap + PCI_EXP_DEVCTL,
7171 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7172 PCI_EXP_DEVCTL_NOSNOOP_EN);
7174 * Older PCIe devices only support the 128 byte
7175 * MPS setting. Enforce the restriction.
7177 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
7178 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7179 pci_write_config_word(tp->pdev,
7180 tp->pcie_cap + PCI_EXP_DEVCTL,
7183 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7185 /* Clear error status */
7186 pci_write_config_word(tp->pdev,
7187 tp->pcie_cap + PCI_EXP_DEVSTA,
7188 PCI_EXP_DEVSTA_CED |
7189 PCI_EXP_DEVSTA_NFED |
7190 PCI_EXP_DEVSTA_FED |
7191 PCI_EXP_DEVSTA_URD);
7194 tg3_restore_pci_state(tp);
7196 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7199 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7200 val = tr32(MEMARB_MODE);
7201 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7203 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7205 tw32(0x5000, 0x400);
7208 tw32(GRC_MODE, tp->grc_mode);
7210 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7213 tw32(0xc4, val | (1 << 15));
7216 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7218 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7219 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7220 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7221 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7224 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7225 tp->mac_mode = MAC_MODE_APE_TX_EN |
7226 MAC_MODE_APE_RX_EN |
7227 MAC_MODE_TDE_ENABLE;
7229 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7230 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7232 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7233 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7238 tw32_f(MAC_MODE, val);
7241 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7243 err = tg3_poll_fw(tp);
7249 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7250 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7251 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7252 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
7255 tw32(0x7c00, val | (1 << 25));
7258 /* Reprobe ASF enable state. */
7259 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7260 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7261 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7262 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7265 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7266 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7267 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7268 tp->last_event_jiffies = jiffies;
7269 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7270 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7277 /* tp->lock is held. */
7278 static void tg3_stop_fw(struct tg3 *tp)
7280 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7281 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7282 /* Wait for RX cpu to ACK the previous event. */
7283 tg3_wait_for_event_ack(tp);
7285 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7287 tg3_generate_fw_event(tp);
7289 /* Wait for RX cpu to ACK this event. */
7290 tg3_wait_for_event_ack(tp);
7294 /* tp->lock is held. */
7295 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7301 tg3_write_sig_pre_reset(tp, kind);
7303 tg3_abort_hw(tp, silent);
7304 err = tg3_chip_reset(tp);
7306 __tg3_set_mac_addr(tp, 0);
7308 tg3_write_sig_legacy(tp, kind);
7309 tg3_write_sig_post_reset(tp, kind);
7317 #define RX_CPU_SCRATCH_BASE 0x30000
7318 #define RX_CPU_SCRATCH_SIZE 0x04000
7319 #define TX_CPU_SCRATCH_BASE 0x34000
7320 #define TX_CPU_SCRATCH_SIZE 0x04000
7322 /* tp->lock is held. */
7323 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7327 BUG_ON(offset == TX_CPU_BASE &&
7328 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7330 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7331 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7333 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7336 if (offset == RX_CPU_BASE) {
7337 for (i = 0; i < 10000; i++) {
7338 tw32(offset + CPU_STATE, 0xffffffff);
7339 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7340 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7344 tw32(offset + CPU_STATE, 0xffffffff);
7345 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7348 for (i = 0; i < 10000; i++) {
7349 tw32(offset + CPU_STATE, 0xffffffff);
7350 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7351 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7357 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7358 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7362 /* Clear firmware's nvram arbitration. */
7363 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7364 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7369 unsigned int fw_base;
7370 unsigned int fw_len;
7371 const __be32 *fw_data;
7374 /* tp->lock is held. */
7375 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7376 int cpu_scratch_size, struct fw_info *info)
7378 int err, lock_err, i;
7379 void (*write_op)(struct tg3 *, u32, u32);
7381 if (cpu_base == TX_CPU_BASE &&
7382 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7384 "%s: Trying to load TX cpu firmware which is 5705\n",
7389 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7390 write_op = tg3_write_mem;
7392 write_op = tg3_write_indirect_reg32;
7394 /* It is possible that bootcode is still loading at this point.
7395 * Get the nvram lock first before halting the cpu.
7397 lock_err = tg3_nvram_lock(tp);
7398 err = tg3_halt_cpu(tp, cpu_base);
7400 tg3_nvram_unlock(tp);
7404 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7405 write_op(tp, cpu_scratch_base + i, 0);
7406 tw32(cpu_base + CPU_STATE, 0xffffffff);
7407 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7408 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7409 write_op(tp, (cpu_scratch_base +
7410 (info->fw_base & 0xffff) +
7412 be32_to_cpu(info->fw_data[i]));
7420 /* tp->lock is held. */
7421 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7423 struct fw_info info;
7424 const __be32 *fw_data;
7427 fw_data = (void *)tp->fw->data;
7429 /* Firmware blob starts with version numbers, followed by
7430 start address and length. We are setting complete length.
7431 length = end_address_of_bss - start_address_of_text.
7432 Remainder is the blob to be loaded contiguously
7433 from start address. */
7435 info.fw_base = be32_to_cpu(fw_data[1]);
7436 info.fw_len = tp->fw->size - 12;
7437 info.fw_data = &fw_data[3];
7439 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7440 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7445 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7446 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7451 /* Now startup only the RX cpu. */
7452 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7453 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7455 for (i = 0; i < 5; i++) {
7456 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7458 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7459 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7460 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7464 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7465 "should be %08x\n", __func__,
7466 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7469 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7470 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7475 /* 5705 needs a special version of the TSO firmware. */
7477 /* tp->lock is held. */
7478 static int tg3_load_tso_firmware(struct tg3 *tp)
7480 struct fw_info info;
7481 const __be32 *fw_data;
7482 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7485 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7488 fw_data = (void *)tp->fw->data;
7490 /* Firmware blob starts with version numbers, followed by
7491 start address and length. We are setting complete length.
7492 length = end_address_of_bss - start_address_of_text.
7493 Remainder is the blob to be loaded contiguously
7494 from start address. */
7496 info.fw_base = be32_to_cpu(fw_data[1]);
7497 cpu_scratch_size = tp->fw_len;
7498 info.fw_len = tp->fw->size - 12;
7499 info.fw_data = &fw_data[3];
7501 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7502 cpu_base = RX_CPU_BASE;
7503 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7505 cpu_base = TX_CPU_BASE;
7506 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7507 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7510 err = tg3_load_firmware_cpu(tp, cpu_base,
7511 cpu_scratch_base, cpu_scratch_size,
7516 /* Now startup the cpu. */
7517 tw32(cpu_base + CPU_STATE, 0xffffffff);
7518 tw32_f(cpu_base + CPU_PC, info.fw_base);
7520 for (i = 0; i < 5; i++) {
7521 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7523 tw32(cpu_base + CPU_STATE, 0xffffffff);
7524 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7525 tw32_f(cpu_base + CPU_PC, info.fw_base);
7530 "%s fails to set CPU PC, is %08x should be %08x\n",
7531 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7534 tw32(cpu_base + CPU_STATE, 0xffffffff);
7535 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7540 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7542 struct tg3 *tp = netdev_priv(dev);
7543 struct sockaddr *addr = p;
7544 int err = 0, skip_mac_1 = 0;
7546 if (!is_valid_ether_addr(addr->sa_data))
7549 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7551 if (!netif_running(dev))
7554 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7555 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7557 addr0_high = tr32(MAC_ADDR_0_HIGH);
7558 addr0_low = tr32(MAC_ADDR_0_LOW);
7559 addr1_high = tr32(MAC_ADDR_1_HIGH);
7560 addr1_low = tr32(MAC_ADDR_1_LOW);
7562 /* Skip MAC addr 1 if ASF is using it. */
7563 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7564 !(addr1_high == 0 && addr1_low == 0))
7567 spin_lock_bh(&tp->lock);
7568 __tg3_set_mac_addr(tp, skip_mac_1);
7569 spin_unlock_bh(&tp->lock);
7574 /* tp->lock is held. */
7575 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7576 dma_addr_t mapping, u32 maxlen_flags,
7580 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7581 ((u64) mapping >> 32));
7583 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7584 ((u64) mapping & 0xffffffff));
7586 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7589 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7591 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7595 static void __tg3_set_rx_mode(struct net_device *);
7596 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7600 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7601 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7602 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7603 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7605 tw32(HOSTCC_TXCOL_TICKS, 0);
7606 tw32(HOSTCC_TXMAX_FRAMES, 0);
7607 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7610 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
7611 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7612 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7613 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7615 tw32(HOSTCC_RXCOL_TICKS, 0);
7616 tw32(HOSTCC_RXMAX_FRAMES, 0);
7617 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7620 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7621 u32 val = ec->stats_block_coalesce_usecs;
7623 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7624 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7626 if (!netif_carrier_ok(tp->dev))
7629 tw32(HOSTCC_STAT_COAL_TICKS, val);
7632 for (i = 0; i < tp->irq_cnt - 1; i++) {
7635 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7636 tw32(reg, ec->rx_coalesce_usecs);
7637 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7638 tw32(reg, ec->rx_max_coalesced_frames);
7639 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7640 tw32(reg, ec->rx_max_coalesced_frames_irq);
7642 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7643 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7644 tw32(reg, ec->tx_coalesce_usecs);
7645 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7646 tw32(reg, ec->tx_max_coalesced_frames);
7647 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7648 tw32(reg, ec->tx_max_coalesced_frames_irq);
7652 for (; i < tp->irq_max - 1; i++) {
7653 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7654 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7655 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7657 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7658 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7659 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7660 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7665 /* tp->lock is held. */
7666 static void tg3_rings_reset(struct tg3 *tp)
7669 u32 stblk, txrcb, rxrcb, limit;
7670 struct tg3_napi *tnapi = &tp->napi[0];
7672 /* Disable all transmit rings but the first. */
7673 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7674 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7675 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7676 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7677 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7678 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7679 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7681 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7683 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7684 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7685 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7686 BDINFO_FLAGS_DISABLED);
7689 /* Disable all receive return rings but the first. */
7690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7691 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7692 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7693 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7694 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7695 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7696 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7697 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7699 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7701 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7702 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7703 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7704 BDINFO_FLAGS_DISABLED);
7706 /* Disable interrupts */
7707 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7709 /* Zero mailbox registers. */
7710 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7711 for (i = 1; i < tp->irq_max; i++) {
7712 tp->napi[i].tx_prod = 0;
7713 tp->napi[i].tx_cons = 0;
7714 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7715 tw32_mailbox(tp->napi[i].prodmbox, 0);
7716 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7717 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7719 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7720 tw32_mailbox(tp->napi[0].prodmbox, 0);
7722 tp->napi[0].tx_prod = 0;
7723 tp->napi[0].tx_cons = 0;
7724 tw32_mailbox(tp->napi[0].prodmbox, 0);
7725 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7728 /* Make sure the NIC-based send BD rings are disabled. */
7729 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7730 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7731 for (i = 0; i < 16; i++)
7732 tw32_tx_mbox(mbox + i * 8, 0);
7735 txrcb = NIC_SRAM_SEND_RCB;
7736 rxrcb = NIC_SRAM_RCV_RET_RCB;
7738 /* Clear status block in ram. */
7739 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7741 /* Set status block DMA address */
7742 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7743 ((u64) tnapi->status_mapping >> 32));
7744 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7745 ((u64) tnapi->status_mapping & 0xffffffff));
7747 if (tnapi->tx_ring) {
7748 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7749 (TG3_TX_RING_SIZE <<
7750 BDINFO_FLAGS_MAXLEN_SHIFT),
7751 NIC_SRAM_TX_BUFFER_DESC);
7752 txrcb += TG3_BDINFO_SIZE;
7755 if (tnapi->rx_rcb) {
7756 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7757 (tp->rx_ret_ring_mask + 1) <<
7758 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7759 rxrcb += TG3_BDINFO_SIZE;
7762 stblk = HOSTCC_STATBLCK_RING1;
7764 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7765 u64 mapping = (u64)tnapi->status_mapping;
7766 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7767 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7769 /* Clear status block in ram. */
7770 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7772 if (tnapi->tx_ring) {
7773 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7774 (TG3_TX_RING_SIZE <<
7775 BDINFO_FLAGS_MAXLEN_SHIFT),
7776 NIC_SRAM_TX_BUFFER_DESC);
7777 txrcb += TG3_BDINFO_SIZE;
7780 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7781 ((tp->rx_ret_ring_mask + 1) <<
7782 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7785 rxrcb += TG3_BDINFO_SIZE;
7789 /* tp->lock is held. */
7790 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7792 u32 val, rdmac_mode;
7794 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7796 tg3_disable_ints(tp);
7800 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7802 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7803 tg3_abort_hw(tp, 1);
7805 /* Enable MAC control of LPI */
7806 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7807 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7808 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7809 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7811 tw32_f(TG3_CPMU_EEE_CTRL,
7812 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7814 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7815 TG3_CPMU_EEEMD_LPI_IN_TX |
7816 TG3_CPMU_EEEMD_LPI_IN_RX |
7817 TG3_CPMU_EEEMD_EEE_ENABLE;
7819 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7820 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7822 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7823 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7825 tw32_f(TG3_CPMU_EEE_MODE, val);
7827 tw32_f(TG3_CPMU_EEE_DBTMR1,
7828 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7829 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7831 tw32_f(TG3_CPMU_EEE_DBTMR2,
7832 TG3_CPMU_DBTMR2_APE_TX_2047US |
7833 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7839 err = tg3_chip_reset(tp);
7843 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7845 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7846 val = tr32(TG3_CPMU_CTRL);
7847 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7848 tw32(TG3_CPMU_CTRL, val);
7850 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7851 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7852 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7853 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7855 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7856 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7857 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7858 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7860 val = tr32(TG3_CPMU_HST_ACC);
7861 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7862 val |= CPMU_HST_ACC_MACCLK_6_25;
7863 tw32(TG3_CPMU_HST_ACC, val);
7866 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7867 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7868 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7869 PCIE_PWR_MGMT_L1_THRESH_4MS;
7870 tw32(PCIE_PWR_MGMT_THRESH, val);
7872 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7873 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7875 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7877 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7878 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7881 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7882 u32 grc_mode = tr32(GRC_MODE);
7884 /* Access the lower 1K of PL PCIE block registers. */
7885 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7886 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7888 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7889 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7890 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7892 tw32(GRC_MODE, grc_mode);
7895 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7896 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7897 u32 grc_mode = tr32(GRC_MODE);
7899 /* Access the lower 1K of PL PCIE block registers. */
7900 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7901 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7903 val = tr32(TG3_PCIE_TLDLPL_PORT +
7904 TG3_PCIE_PL_LO_PHYCTL5);
7905 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7906 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7908 tw32(GRC_MODE, grc_mode);
7911 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7912 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7913 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7914 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7917 /* This works around an issue with Athlon chipsets on
7918 * B3 tigon3 silicon. This bit has no effect on any
7919 * other revision. But do not set this on PCI Express
7920 * chips and don't even touch the clocks if the CPMU is present.
7922 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7923 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7924 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7925 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7928 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7929 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7930 val = tr32(TG3PCI_PCISTATE);
7931 val |= PCISTATE_RETRY_SAME_DMA;
7932 tw32(TG3PCI_PCISTATE, val);
7935 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7936 /* Allow reads and writes to the
7937 * APE register and memory space.
7939 val = tr32(TG3PCI_PCISTATE);
7940 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7941 PCISTATE_ALLOW_APE_SHMEM_WR |
7942 PCISTATE_ALLOW_APE_PSPACE_WR;
7943 tw32(TG3PCI_PCISTATE, val);
7946 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7947 /* Enable some hw fixes. */
7948 val = tr32(TG3PCI_MSI_DATA);
7949 val |= (1 << 26) | (1 << 28) | (1 << 29);
7950 tw32(TG3PCI_MSI_DATA, val);
7953 /* Descriptor ring init may make accesses to the
7954 * NIC SRAM area to setup the TX descriptors, so we
7955 * can only do this after the hardware has been
7956 * successfully reset.
7958 err = tg3_init_rings(tp);
7962 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
7963 val = tr32(TG3PCI_DMA_RW_CTRL) &
7964 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7965 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7966 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7967 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7968 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7969 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7970 /* This value is determined during the probe time DMA
7971 * engine test, tg3_test_dma.
7973 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7976 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7977 GRC_MODE_4X_NIC_SEND_RINGS |
7978 GRC_MODE_NO_TX_PHDR_CSUM |
7979 GRC_MODE_NO_RX_PHDR_CSUM);
7980 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7982 /* Pseudo-header checksum is done by hardware logic and not
7983 * the offload processers, so make the chip do the pseudo-
7984 * header checksums on receive. For transmit it is more
7985 * convenient to do the pseudo-header checksum in software
7986 * as Linux does that on transmit for us in all cases.
7988 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7992 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7994 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7995 val = tr32(GRC_MISC_CFG);
7997 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7998 tw32(GRC_MISC_CFG, val);
8000 /* Initialize MBUF/DESC pool. */
8001 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8003 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8004 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8005 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8006 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8008 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8009 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8010 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8011 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8014 fw_len = tp->fw_len;
8015 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8016 tw32(BUFMGR_MB_POOL_ADDR,
8017 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8018 tw32(BUFMGR_MB_POOL_SIZE,
8019 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8022 if (tp->dev->mtu <= ETH_DATA_LEN) {
8023 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8024 tp->bufmgr_config.mbuf_read_dma_low_water);
8025 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8026 tp->bufmgr_config.mbuf_mac_rx_low_water);
8027 tw32(BUFMGR_MB_HIGH_WATER,
8028 tp->bufmgr_config.mbuf_high_water);
8030 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8031 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8032 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8033 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8034 tw32(BUFMGR_MB_HIGH_WATER,
8035 tp->bufmgr_config.mbuf_high_water_jumbo);
8037 tw32(BUFMGR_DMA_LOW_WATER,
8038 tp->bufmgr_config.dma_low_water);
8039 tw32(BUFMGR_DMA_HIGH_WATER,
8040 tp->bufmgr_config.dma_high_water);
8042 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8044 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8045 tw32(BUFMGR_MODE, val);
8046 for (i = 0; i < 2000; i++) {
8047 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8052 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8056 /* Setup replenish threshold. */
8057 val = tp->rx_pending / 8;
8060 else if (val > tp->rx_std_max_post)
8061 val = tp->rx_std_max_post;
8062 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8063 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8064 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8066 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
8067 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
8070 tw32(RCVBDI_STD_THRESH, val);
8072 /* Initialize TG3_BDINFO's at:
8073 * RCVDBDI_STD_BD: standard eth size rx ring
8074 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8075 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8078 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8079 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8080 * ring attribute flags
8081 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8083 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8084 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8086 * The size of each ring is fixed in the firmware, but the location is
8089 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8090 ((u64) tpr->rx_std_mapping >> 32));
8091 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8092 ((u64) tpr->rx_std_mapping & 0xffffffff));
8093 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8094 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
8095 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8096 NIC_SRAM_RX_BUFFER_DESC);
8098 /* Disable the mini ring */
8099 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8100 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8101 BDINFO_FLAGS_DISABLED);
8103 /* Program the jumbo buffer descriptor ring control
8104 * blocks on those devices that have them.
8106 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8107 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
8108 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))) {
8109 /* Setup replenish threshold. */
8110 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
8112 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
8113 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8114 ((u64) tpr->rx_jmb_mapping >> 32));
8115 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8116 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8117 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8118 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
8119 BDINFO_FLAGS_USE_EXT_RECV);
8120 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
8121 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8122 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8123 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8125 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8126 BDINFO_FLAGS_DISABLED);
8129 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
8130 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8131 val = RX_STD_MAX_SIZE_5705;
8133 val = RX_STD_MAX_SIZE_5717;
8134 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8135 val |= (TG3_RX_STD_DMA_SZ << 2);
8137 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8139 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
8141 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8143 tpr->rx_std_prod_idx = tp->rx_pending;
8144 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8146 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
8147 tp->rx_jumbo_pending : 0;
8148 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8150 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
8151 tw32(STD_REPLENISH_LWM, 32);
8152 tw32(JMB_REPLENISH_LWM, 16);
8155 tg3_rings_reset(tp);
8157 /* Initialize MAC address and backoff seed. */
8158 __tg3_set_mac_addr(tp, 0);
8160 /* MTU + ethernet header + FCS + optional VLAN tag */
8161 tw32(MAC_RX_MTU_SIZE,
8162 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8164 /* The slot time is changed by tg3_setup_phy if we
8165 * run at gigabit with half duplex.
8167 tw32(MAC_TX_LENGTHS,
8168 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8169 (6 << TX_LENGTHS_IPG_SHIFT) |
8170 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
8172 /* Receive rules. */
8173 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8174 tw32(RCVLPC_CONFIG, 0x0181);
8176 /* Calculate RDMAC_MODE setting early, we need it to determine
8177 * the RCVLPC_STATE_ENABLE mask.
8179 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8180 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8181 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8182 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8183 RDMAC_MODE_LNGREAD_ENAB);
8185 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8186 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8188 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8189 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8190 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8191 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8192 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8193 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8195 /* If statement applies to 5705 and 5750 PCI devices only */
8196 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8197 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8198 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8199 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8200 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8201 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8202 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8203 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8204 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8208 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8209 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8211 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8212 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8214 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8215 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8216 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8217 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8223 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8224 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8225 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8226 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8227 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8228 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8229 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8230 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8231 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8233 tw32(TG3_RDMA_RSRVCTRL_REG,
8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8237 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8238 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8239 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8240 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8241 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8244 /* Receive/send statistics. */
8245 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8246 val = tr32(RCVLPC_STATS_ENABLE);
8247 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8248 tw32(RCVLPC_STATS_ENABLE, val);
8249 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8250 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8251 val = tr32(RCVLPC_STATS_ENABLE);
8252 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8253 tw32(RCVLPC_STATS_ENABLE, val);
8255 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8257 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8258 tw32(SNDDATAI_STATSENAB, 0xffffff);
8259 tw32(SNDDATAI_STATSCTRL,
8260 (SNDDATAI_SCTRL_ENABLE |
8261 SNDDATAI_SCTRL_FASTUPD));
8263 /* Setup host coalescing engine. */
8264 tw32(HOSTCC_MODE, 0);
8265 for (i = 0; i < 2000; i++) {
8266 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8271 __tg3_set_coalesce(tp, &tp->coal);
8273 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8274 /* Status/statistics block address. See tg3_timer,
8275 * the tg3_periodic_fetch_stats call there, and
8276 * tg3_get_stats to see how this works for 5705/5750 chips.
8278 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8279 ((u64) tp->stats_mapping >> 32));
8280 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8281 ((u64) tp->stats_mapping & 0xffffffff));
8282 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8284 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8286 /* Clear statistics and status block memory areas */
8287 for (i = NIC_SRAM_STATS_BLK;
8288 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8290 tg3_write_mem(tp, i, 0);
8295 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8297 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8298 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8299 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8300 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8302 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8303 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8304 /* reset to prevent losing 1st rx packet intermittently */
8305 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8309 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8310 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8313 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8314 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8315 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8316 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8317 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8318 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8319 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8322 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8323 * If TG3_FLG2_IS_NIC is zero, we should read the
8324 * register to preserve the GPIO settings for LOMs. The GPIOs,
8325 * whether used as inputs or outputs, are set by boot code after
8328 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8331 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8332 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8333 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8335 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8336 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8337 GRC_LCLCTRL_GPIO_OUTPUT3;
8339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8340 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8342 tp->grc_local_ctrl &= ~gpio_mask;
8343 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8345 /* GPIO1 must be driven high for eeprom write protect */
8346 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8347 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8348 GRC_LCLCTRL_GPIO_OUTPUT1);
8350 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8353 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
8355 val = tr32(MSGINT_MODE);
8356 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8357 tw32(MSGINT_MODE, val);
8360 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8361 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8365 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8366 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8367 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8368 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8369 WDMAC_MODE_LNGREAD_ENAB);
8371 /* If statement applies to 5705 and 5750 PCI devices only */
8372 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8373 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8374 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8375 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8376 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8377 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8379 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8380 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8381 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8382 val |= WDMAC_MODE_RX_ACCEL;
8386 /* Enable host coalescing bug fix */
8387 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8388 val |= WDMAC_MODE_STATUS_TAG_FIX;
8390 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8391 val |= WDMAC_MODE_BURST_ALL_DATA;
8393 tw32_f(WDMAC_MODE, val);
8396 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8399 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8402 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8403 pcix_cmd |= PCI_X_CMD_READ_2K;
8404 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8405 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8406 pcix_cmd |= PCI_X_CMD_READ_2K;
8408 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8412 tw32_f(RDMAC_MODE, rdmac_mode);
8415 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8416 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8417 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8419 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8421 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8423 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8425 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8426 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8427 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8429 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8430 val |= RCVDBDI_MODE_LRG_RING_SZ;
8431 tw32(RCVDBDI_MODE, val);
8432 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8433 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8434 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8435 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8436 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8437 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8438 tw32(SNDBDI_MODE, val);
8439 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8441 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8442 err = tg3_load_5701_a0_firmware_fix(tp);
8447 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8448 err = tg3_load_tso_firmware(tp);
8453 tp->tx_mode = TX_MODE_ENABLE;
8454 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8456 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8457 tw32_f(MAC_TX_MODE, tp->tx_mode);
8460 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8461 u32 reg = MAC_RSS_INDIR_TBL_0;
8462 u8 *ent = (u8 *)&val;
8464 /* Setup the indirection table */
8465 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8466 int idx = i % sizeof(val);
8468 ent[idx] = i % (tp->irq_cnt - 1);
8469 if (idx == sizeof(val) - 1) {
8475 /* Setup the "secret" hash key. */
8476 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8477 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8478 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8479 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8480 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8481 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8482 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8483 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8484 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8485 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8488 tp->rx_mode = RX_MODE_ENABLE;
8489 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8490 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8492 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8493 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8494 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8495 RX_MODE_RSS_IPV6_HASH_EN |
8496 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8497 RX_MODE_RSS_IPV4_HASH_EN |
8498 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8500 tw32_f(MAC_RX_MODE, tp->rx_mode);
8503 tw32(MAC_LED_CTRL, tp->led_ctrl);
8505 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8506 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8507 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8510 tw32_f(MAC_RX_MODE, tp->rx_mode);
8513 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8514 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8515 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8516 /* Set drive transmission level to 1.2V */
8517 /* only if the signal pre-emphasis bit is not set */
8518 val = tr32(MAC_SERDES_CFG);
8521 tw32(MAC_SERDES_CFG, val);
8523 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8524 tw32(MAC_SERDES_CFG, 0x616000);
8527 /* Prevent chip from dropping frames when flow control
8530 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8534 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8536 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8537 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8538 /* Use hardware link auto-negotiation */
8539 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8542 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8543 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8546 tmp = tr32(SERDES_RX_CTRL);
8547 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8548 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8549 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8550 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8553 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8554 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8555 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8556 tp->link_config.speed = tp->link_config.orig_speed;
8557 tp->link_config.duplex = tp->link_config.orig_duplex;
8558 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8561 err = tg3_setup_phy(tp, 0);
8565 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8566 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8569 /* Clear CRC stats. */
8570 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8571 tg3_writephy(tp, MII_TG3_TEST1,
8572 tmp | MII_TG3_TEST1_CRC_EN);
8573 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8578 __tg3_set_rx_mode(tp->dev);
8580 /* Initialize receive rules. */
8581 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8582 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8583 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8584 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8586 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8587 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8591 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8595 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8597 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8599 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8601 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8603 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8605 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8607 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8609 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8611 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8613 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8615 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8617 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8619 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8621 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8629 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8630 /* Write our heartbeat update interval to APE. */
8631 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8632 APE_HOST_HEARTBEAT_INT_DISABLE);
8634 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8639 /* Called at device open time to get the chip ready for
8640 * packet processing. Invoked with tp->lock held.
8642 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8644 tg3_switch_clocks(tp);
8646 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8648 return tg3_reset_hw(tp, reset_phy);
8651 #define TG3_STAT_ADD32(PSTAT, REG) \
8652 do { u32 __val = tr32(REG); \
8653 (PSTAT)->low += __val; \
8654 if ((PSTAT)->low < __val) \
8655 (PSTAT)->high += 1; \
8658 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8660 struct tg3_hw_stats *sp = tp->hw_stats;
8662 if (!netif_carrier_ok(tp->dev))
8665 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8666 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8667 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8668 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8669 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8670 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8671 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8672 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8673 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8674 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8675 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8676 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8677 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8679 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8680 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8681 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8682 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8683 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8684 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8685 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8686 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8687 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8688 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8689 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8690 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8691 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8692 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8694 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8695 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8696 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8699 static void tg3_timer(unsigned long __opaque)
8701 struct tg3 *tp = (struct tg3 *) __opaque;
8706 spin_lock(&tp->lock);
8708 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8709 /* All of this garbage is because when using non-tagged
8710 * IRQ status the mailbox/status_block protocol the chip
8711 * uses with the cpu is race prone.
8713 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8714 tw32(GRC_LOCAL_CTRL,
8715 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8717 tw32(HOSTCC_MODE, tp->coalesce_mode |
8718 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8721 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8722 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8723 spin_unlock(&tp->lock);
8724 schedule_work(&tp->reset_task);
8729 /* This part only runs once per second. */
8730 if (!--tp->timer_counter) {
8731 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8732 tg3_periodic_fetch_stats(tp);
8734 if (tp->setlpicnt && !--tp->setlpicnt) {
8735 u32 val = tr32(TG3_CPMU_EEE_MODE);
8736 tw32(TG3_CPMU_EEE_MODE,
8737 val | TG3_CPMU_EEEMD_LPI_ENABLE);
8740 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8744 mac_stat = tr32(MAC_STATUS);
8747 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8748 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8750 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8754 tg3_setup_phy(tp, 0);
8755 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8756 u32 mac_stat = tr32(MAC_STATUS);
8759 if (netif_carrier_ok(tp->dev) &&
8760 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8763 if (!netif_carrier_ok(tp->dev) &&
8764 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8765 MAC_STATUS_SIGNAL_DET))) {
8769 if (!tp->serdes_counter) {
8772 ~MAC_MODE_PORT_MODE_MASK));
8774 tw32_f(MAC_MODE, tp->mac_mode);
8777 tg3_setup_phy(tp, 0);
8779 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8780 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8781 tg3_serdes_parallel_detect(tp);
8784 tp->timer_counter = tp->timer_multiplier;
8787 /* Heartbeat is only sent once every 2 seconds.
8789 * The heartbeat is to tell the ASF firmware that the host
8790 * driver is still alive. In the event that the OS crashes,
8791 * ASF needs to reset the hardware to free up the FIFO space
8792 * that may be filled with rx packets destined for the host.
8793 * If the FIFO is full, ASF will no longer function properly.
8795 * Unintended resets have been reported on real time kernels
8796 * where the timer doesn't run on time. Netpoll will also have
8799 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8800 * to check the ring condition when the heartbeat is expiring
8801 * before doing the reset. This will prevent most unintended
8804 if (!--tp->asf_counter) {
8805 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8806 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8807 tg3_wait_for_event_ack(tp);
8809 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8810 FWCMD_NICDRV_ALIVE3);
8811 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8812 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8813 TG3_FW_UPDATE_TIMEOUT_SEC);
8815 tg3_generate_fw_event(tp);
8817 tp->asf_counter = tp->asf_multiplier;
8820 spin_unlock(&tp->lock);
8823 tp->timer.expires = jiffies + tp->timer_offset;
8824 add_timer(&tp->timer);
8827 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8830 unsigned long flags;
8832 struct tg3_napi *tnapi = &tp->napi[irq_num];
8834 if (tp->irq_cnt == 1)
8835 name = tp->dev->name;
8837 name = &tnapi->irq_lbl[0];
8838 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8839 name[IFNAMSIZ-1] = 0;
8842 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8844 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8846 flags = IRQF_SAMPLE_RANDOM;
8849 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8850 fn = tg3_interrupt_tagged;
8851 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8854 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8857 static int tg3_test_interrupt(struct tg3 *tp)
8859 struct tg3_napi *tnapi = &tp->napi[0];
8860 struct net_device *dev = tp->dev;
8861 int err, i, intr_ok = 0;
8864 if (!netif_running(dev))
8867 tg3_disable_ints(tp);
8869 free_irq(tnapi->irq_vec, tnapi);
8872 * Turn off MSI one shot mode. Otherwise this test has no
8873 * observable way to know whether the interrupt was delivered.
8875 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8876 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8877 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8878 tw32(MSGINT_MODE, val);
8881 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8882 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8886 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8887 tg3_enable_ints(tp);
8889 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8892 for (i = 0; i < 5; i++) {
8893 u32 int_mbox, misc_host_ctrl;
8895 int_mbox = tr32_mailbox(tnapi->int_mbox);
8896 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8898 if ((int_mbox != 0) ||
8899 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8907 tg3_disable_ints(tp);
8909 free_irq(tnapi->irq_vec, tnapi);
8911 err = tg3_request_irq(tp, 0);
8917 /* Reenable MSI one shot mode. */
8918 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
8919 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8920 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8921 tw32(MSGINT_MODE, val);
8929 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8930 * successfully restored
8932 static int tg3_test_msi(struct tg3 *tp)
8937 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8940 /* Turn off SERR reporting in case MSI terminates with Master
8943 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8944 pci_write_config_word(tp->pdev, PCI_COMMAND,
8945 pci_cmd & ~PCI_COMMAND_SERR);
8947 err = tg3_test_interrupt(tp);
8949 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8954 /* other failures */
8958 /* MSI test failed, go back to INTx mode */
8959 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8960 "to INTx mode. Please report this failure to the PCI "
8961 "maintainer and include system chipset information\n");
8963 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8965 pci_disable_msi(tp->pdev);
8967 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8968 tp->napi[0].irq_vec = tp->pdev->irq;
8970 err = tg3_request_irq(tp, 0);
8974 /* Need to reset the chip because the MSI cycle may have terminated
8975 * with Master Abort.
8977 tg3_full_lock(tp, 1);
8979 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8980 err = tg3_init_hw(tp, 1);
8982 tg3_full_unlock(tp);
8985 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8990 static int tg3_request_firmware(struct tg3 *tp)
8992 const __be32 *fw_data;
8994 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8995 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9000 fw_data = (void *)tp->fw->data;
9002 /* Firmware blob starts with version numbers, followed by
9003 * start address and _full_ length including BSS sections
9004 * (which must be longer than the actual data, of course
9007 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9008 if (tp->fw_len < (tp->fw->size - 12)) {
9009 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9010 tp->fw_len, tp->fw_needed);
9011 release_firmware(tp->fw);
9016 /* We no longer need firmware; we have it. */
9017 tp->fw_needed = NULL;
9021 static bool tg3_enable_msix(struct tg3 *tp)
9023 int i, rc, cpus = num_online_cpus();
9024 struct msix_entry msix_ent[tp->irq_max];
9027 /* Just fallback to the simpler MSI mode. */
9031 * We want as many rx rings enabled as there are cpus.
9032 * The first MSIX vector only deals with link interrupts, etc,
9033 * so we add one to the number of vectors we are requesting.
9035 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9037 for (i = 0; i < tp->irq_max; i++) {
9038 msix_ent[i].entry = i;
9039 msix_ent[i].vector = 0;
9042 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9045 } else if (rc != 0) {
9046 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9048 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9053 for (i = 0; i < tp->irq_max; i++)
9054 tp->napi[i].irq_vec = msix_ent[i].vector;
9056 netif_set_real_num_tx_queues(tp->dev, 1);
9057 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9058 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9059 pci_disable_msix(tp->pdev);
9063 if (tp->irq_cnt > 1) {
9064 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9065 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9066 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9067 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9074 static void tg3_ints_init(struct tg3 *tp)
9076 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
9077 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
9078 /* All MSI supporting chips should support tagged
9079 * status. Assert that this is the case.
9081 netdev_warn(tp->dev,
9082 "MSI without TAGGED_STATUS? Not using MSI\n");
9086 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
9087 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
9088 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
9089 pci_enable_msi(tp->pdev) == 0)
9090 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
9092 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
9093 u32 msi_mode = tr32(MSGINT_MODE);
9094 if ((tp->tg3_flags2 & TG3_FLG2_USING_MSIX) &&
9096 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9097 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9100 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
9102 tp->napi[0].irq_vec = tp->pdev->irq;
9103 netif_set_real_num_tx_queues(tp->dev, 1);
9104 netif_set_real_num_rx_queues(tp->dev, 1);
9108 static void tg3_ints_fini(struct tg3 *tp)
9110 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
9111 pci_disable_msix(tp->pdev);
9112 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
9113 pci_disable_msi(tp->pdev);
9114 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
9115 tp->tg3_flags3 &= ~(TG3_FLG3_ENABLE_RSS | TG3_FLG3_ENABLE_TSS);
9118 static int tg3_open(struct net_device *dev)
9120 struct tg3 *tp = netdev_priv(dev);
9123 if (tp->fw_needed) {
9124 err = tg3_request_firmware(tp);
9125 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9129 netdev_warn(tp->dev, "TSO capability disabled\n");
9130 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
9131 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9132 netdev_notice(tp->dev, "TSO capability restored\n");
9133 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
9137 netif_carrier_off(tp->dev);
9139 err = tg3_power_up(tp);
9143 tg3_full_lock(tp, 0);
9145 tg3_disable_ints(tp);
9146 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9148 tg3_full_unlock(tp);
9151 * Setup interrupts first so we know how
9152 * many NAPI resources to allocate
9156 /* The placement of this call is tied
9157 * to the setup and use of Host TX descriptors.
9159 err = tg3_alloc_consistent(tp);
9165 tg3_napi_enable(tp);
9167 for (i = 0; i < tp->irq_cnt; i++) {
9168 struct tg3_napi *tnapi = &tp->napi[i];
9169 err = tg3_request_irq(tp, i);
9171 for (i--; i >= 0; i--)
9172 free_irq(tnapi->irq_vec, tnapi);
9180 tg3_full_lock(tp, 0);
9182 err = tg3_init_hw(tp, 1);
9184 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9187 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
9188 tp->timer_offset = HZ;
9190 tp->timer_offset = HZ / 10;
9192 BUG_ON(tp->timer_offset > HZ);
9193 tp->timer_counter = tp->timer_multiplier =
9194 (HZ / tp->timer_offset);
9195 tp->asf_counter = tp->asf_multiplier =
9196 ((HZ / tp->timer_offset) * 2);
9198 init_timer(&tp->timer);
9199 tp->timer.expires = jiffies + tp->timer_offset;
9200 tp->timer.data = (unsigned long) tp;
9201 tp->timer.function = tg3_timer;
9204 tg3_full_unlock(tp);
9209 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
9210 err = tg3_test_msi(tp);
9213 tg3_full_lock(tp, 0);
9214 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9216 tg3_full_unlock(tp);
9221 if (!(tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
9222 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
9223 u32 val = tr32(PCIE_TRANSACTION_CFG);
9225 tw32(PCIE_TRANSACTION_CFG,
9226 val | PCIE_TRANS_CFG_1SHOT_MSI);
9232 tg3_full_lock(tp, 0);
9234 add_timer(&tp->timer);
9235 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9236 tg3_enable_ints(tp);
9238 tg3_full_unlock(tp);
9240 netif_tx_start_all_queues(dev);
9245 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9246 struct tg3_napi *tnapi = &tp->napi[i];
9247 free_irq(tnapi->irq_vec, tnapi);
9251 tg3_napi_disable(tp);
9253 tg3_free_consistent(tp);
9260 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9261 struct rtnl_link_stats64 *);
9262 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9264 static int tg3_close(struct net_device *dev)
9267 struct tg3 *tp = netdev_priv(dev);
9269 tg3_napi_disable(tp);
9270 cancel_work_sync(&tp->reset_task);
9272 netif_tx_stop_all_queues(dev);
9274 del_timer_sync(&tp->timer);
9278 tg3_full_lock(tp, 1);
9280 tg3_disable_ints(tp);
9282 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9284 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9286 tg3_full_unlock(tp);
9288 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9289 struct tg3_napi *tnapi = &tp->napi[i];
9290 free_irq(tnapi->irq_vec, tnapi);
9295 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9297 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9298 sizeof(tp->estats_prev));
9302 tg3_free_consistent(tp);
9306 netif_carrier_off(tp->dev);
9311 static inline u64 get_stat64(tg3_stat64_t *val)
9313 return ((u64)val->high << 32) | ((u64)val->low);
9316 static u64 calc_crc_errors(struct tg3 *tp)
9318 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9320 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9321 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9322 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9325 spin_lock_bh(&tp->lock);
9326 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9327 tg3_writephy(tp, MII_TG3_TEST1,
9328 val | MII_TG3_TEST1_CRC_EN);
9329 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9332 spin_unlock_bh(&tp->lock);
9334 tp->phy_crc_errors += val;
9336 return tp->phy_crc_errors;
9339 return get_stat64(&hw_stats->rx_fcs_errors);
9342 #define ESTAT_ADD(member) \
9343 estats->member = old_estats->member + \
9344 get_stat64(&hw_stats->member)
9346 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9348 struct tg3_ethtool_stats *estats = &tp->estats;
9349 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9350 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9355 ESTAT_ADD(rx_octets);
9356 ESTAT_ADD(rx_fragments);
9357 ESTAT_ADD(rx_ucast_packets);
9358 ESTAT_ADD(rx_mcast_packets);
9359 ESTAT_ADD(rx_bcast_packets);
9360 ESTAT_ADD(rx_fcs_errors);
9361 ESTAT_ADD(rx_align_errors);
9362 ESTAT_ADD(rx_xon_pause_rcvd);
9363 ESTAT_ADD(rx_xoff_pause_rcvd);
9364 ESTAT_ADD(rx_mac_ctrl_rcvd);
9365 ESTAT_ADD(rx_xoff_entered);
9366 ESTAT_ADD(rx_frame_too_long_errors);
9367 ESTAT_ADD(rx_jabbers);
9368 ESTAT_ADD(rx_undersize_packets);
9369 ESTAT_ADD(rx_in_length_errors);
9370 ESTAT_ADD(rx_out_length_errors);
9371 ESTAT_ADD(rx_64_or_less_octet_packets);
9372 ESTAT_ADD(rx_65_to_127_octet_packets);
9373 ESTAT_ADD(rx_128_to_255_octet_packets);
9374 ESTAT_ADD(rx_256_to_511_octet_packets);
9375 ESTAT_ADD(rx_512_to_1023_octet_packets);
9376 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9377 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9378 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9379 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9380 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9382 ESTAT_ADD(tx_octets);
9383 ESTAT_ADD(tx_collisions);
9384 ESTAT_ADD(tx_xon_sent);
9385 ESTAT_ADD(tx_xoff_sent);
9386 ESTAT_ADD(tx_flow_control);
9387 ESTAT_ADD(tx_mac_errors);
9388 ESTAT_ADD(tx_single_collisions);
9389 ESTAT_ADD(tx_mult_collisions);
9390 ESTAT_ADD(tx_deferred);
9391 ESTAT_ADD(tx_excessive_collisions);
9392 ESTAT_ADD(tx_late_collisions);
9393 ESTAT_ADD(tx_collide_2times);
9394 ESTAT_ADD(tx_collide_3times);
9395 ESTAT_ADD(tx_collide_4times);
9396 ESTAT_ADD(tx_collide_5times);
9397 ESTAT_ADD(tx_collide_6times);
9398 ESTAT_ADD(tx_collide_7times);
9399 ESTAT_ADD(tx_collide_8times);
9400 ESTAT_ADD(tx_collide_9times);
9401 ESTAT_ADD(tx_collide_10times);
9402 ESTAT_ADD(tx_collide_11times);
9403 ESTAT_ADD(tx_collide_12times);
9404 ESTAT_ADD(tx_collide_13times);
9405 ESTAT_ADD(tx_collide_14times);
9406 ESTAT_ADD(tx_collide_15times);
9407 ESTAT_ADD(tx_ucast_packets);
9408 ESTAT_ADD(tx_mcast_packets);
9409 ESTAT_ADD(tx_bcast_packets);
9410 ESTAT_ADD(tx_carrier_sense_errors);
9411 ESTAT_ADD(tx_discards);
9412 ESTAT_ADD(tx_errors);
9414 ESTAT_ADD(dma_writeq_full);
9415 ESTAT_ADD(dma_write_prioq_full);
9416 ESTAT_ADD(rxbds_empty);
9417 ESTAT_ADD(rx_discards);
9418 ESTAT_ADD(rx_errors);
9419 ESTAT_ADD(rx_threshold_hit);
9421 ESTAT_ADD(dma_readq_full);
9422 ESTAT_ADD(dma_read_prioq_full);
9423 ESTAT_ADD(tx_comp_queue_full);
9425 ESTAT_ADD(ring_set_send_prod_index);
9426 ESTAT_ADD(ring_status_update);
9427 ESTAT_ADD(nic_irqs);
9428 ESTAT_ADD(nic_avoided_irqs);
9429 ESTAT_ADD(nic_tx_threshold_hit);
9434 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9435 struct rtnl_link_stats64 *stats)
9437 struct tg3 *tp = netdev_priv(dev);
9438 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9439 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9444 stats->rx_packets = old_stats->rx_packets +
9445 get_stat64(&hw_stats->rx_ucast_packets) +
9446 get_stat64(&hw_stats->rx_mcast_packets) +
9447 get_stat64(&hw_stats->rx_bcast_packets);
9449 stats->tx_packets = old_stats->tx_packets +
9450 get_stat64(&hw_stats->tx_ucast_packets) +
9451 get_stat64(&hw_stats->tx_mcast_packets) +
9452 get_stat64(&hw_stats->tx_bcast_packets);
9454 stats->rx_bytes = old_stats->rx_bytes +
9455 get_stat64(&hw_stats->rx_octets);
9456 stats->tx_bytes = old_stats->tx_bytes +
9457 get_stat64(&hw_stats->tx_octets);
9459 stats->rx_errors = old_stats->rx_errors +
9460 get_stat64(&hw_stats->rx_errors);
9461 stats->tx_errors = old_stats->tx_errors +
9462 get_stat64(&hw_stats->tx_errors) +
9463 get_stat64(&hw_stats->tx_mac_errors) +
9464 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9465 get_stat64(&hw_stats->tx_discards);
9467 stats->multicast = old_stats->multicast +
9468 get_stat64(&hw_stats->rx_mcast_packets);
9469 stats->collisions = old_stats->collisions +
9470 get_stat64(&hw_stats->tx_collisions);
9472 stats->rx_length_errors = old_stats->rx_length_errors +
9473 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9474 get_stat64(&hw_stats->rx_undersize_packets);
9476 stats->rx_over_errors = old_stats->rx_over_errors +
9477 get_stat64(&hw_stats->rxbds_empty);
9478 stats->rx_frame_errors = old_stats->rx_frame_errors +
9479 get_stat64(&hw_stats->rx_align_errors);
9480 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9481 get_stat64(&hw_stats->tx_discards);
9482 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9483 get_stat64(&hw_stats->tx_carrier_sense_errors);
9485 stats->rx_crc_errors = old_stats->rx_crc_errors +
9486 calc_crc_errors(tp);
9488 stats->rx_missed_errors = old_stats->rx_missed_errors +
9489 get_stat64(&hw_stats->rx_discards);
9491 stats->rx_dropped = tp->rx_dropped;
9496 static inline u32 calc_crc(unsigned char *buf, int len)
9504 for (j = 0; j < len; j++) {
9507 for (k = 0; k < 8; k++) {
9520 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9522 /* accept or reject all multicast frames */
9523 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9524 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9525 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9526 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9529 static void __tg3_set_rx_mode(struct net_device *dev)
9531 struct tg3 *tp = netdev_priv(dev);
9534 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9535 RX_MODE_KEEP_VLAN_TAG);
9537 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9538 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9541 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9542 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9545 if (dev->flags & IFF_PROMISC) {
9546 /* Promiscuous mode. */
9547 rx_mode |= RX_MODE_PROMISC;
9548 } else if (dev->flags & IFF_ALLMULTI) {
9549 /* Accept all multicast. */
9550 tg3_set_multi(tp, 1);
9551 } else if (netdev_mc_empty(dev)) {
9552 /* Reject all multicast. */
9553 tg3_set_multi(tp, 0);
9555 /* Accept one or more multicast(s). */
9556 struct netdev_hw_addr *ha;
9557 u32 mc_filter[4] = { 0, };
9562 netdev_for_each_mc_addr(ha, dev) {
9563 crc = calc_crc(ha->addr, ETH_ALEN);
9565 regidx = (bit & 0x60) >> 5;
9567 mc_filter[regidx] |= (1 << bit);
9570 tw32(MAC_HASH_REG_0, mc_filter[0]);
9571 tw32(MAC_HASH_REG_1, mc_filter[1]);
9572 tw32(MAC_HASH_REG_2, mc_filter[2]);
9573 tw32(MAC_HASH_REG_3, mc_filter[3]);
9576 if (rx_mode != tp->rx_mode) {
9577 tp->rx_mode = rx_mode;
9578 tw32_f(MAC_RX_MODE, rx_mode);
9583 static void tg3_set_rx_mode(struct net_device *dev)
9585 struct tg3 *tp = netdev_priv(dev);
9587 if (!netif_running(dev))
9590 tg3_full_lock(tp, 0);
9591 __tg3_set_rx_mode(dev);
9592 tg3_full_unlock(tp);
9595 #define TG3_REGDUMP_LEN (32 * 1024)
9597 static int tg3_get_regs_len(struct net_device *dev)
9599 return TG3_REGDUMP_LEN;
9602 static void tg3_get_regs(struct net_device *dev,
9603 struct ethtool_regs *regs, void *_p)
9606 struct tg3 *tp = netdev_priv(dev);
9612 memset(p, 0, TG3_REGDUMP_LEN);
9614 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9617 tg3_full_lock(tp, 0);
9619 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9620 #define GET_REG32_LOOP(base, len) \
9621 do { p = (u32 *)(orig_p + (base)); \
9622 for (i = 0; i < len; i += 4) \
9623 __GET_REG32((base) + i); \
9625 #define GET_REG32_1(reg) \
9626 do { p = (u32 *)(orig_p + (reg)); \
9627 __GET_REG32((reg)); \
9630 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9631 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9632 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9633 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9634 GET_REG32_1(SNDDATAC_MODE);
9635 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9636 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9637 GET_REG32_1(SNDBDC_MODE);
9638 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9639 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9640 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9641 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9642 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9643 GET_REG32_1(RCVDCC_MODE);
9644 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9645 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9646 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9647 GET_REG32_1(MBFREE_MODE);
9648 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9649 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9650 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9651 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9652 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9653 GET_REG32_1(RX_CPU_MODE);
9654 GET_REG32_1(RX_CPU_STATE);
9655 GET_REG32_1(RX_CPU_PGMCTR);
9656 GET_REG32_1(RX_CPU_HWBKPT);
9657 GET_REG32_1(TX_CPU_MODE);
9658 GET_REG32_1(TX_CPU_STATE);
9659 GET_REG32_1(TX_CPU_PGMCTR);
9660 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9661 GET_REG32_LOOP(FTQ_RESET, 0x120);
9662 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9663 GET_REG32_1(DMAC_MODE);
9664 GET_REG32_LOOP(GRC_MODE, 0x4c);
9665 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9666 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9669 #undef GET_REG32_LOOP
9672 tg3_full_unlock(tp);
9675 static int tg3_get_eeprom_len(struct net_device *dev)
9677 struct tg3 *tp = netdev_priv(dev);
9679 return tp->nvram_size;
9682 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9684 struct tg3 *tp = netdev_priv(dev);
9687 u32 i, offset, len, b_offset, b_count;
9690 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9693 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9696 offset = eeprom->offset;
9700 eeprom->magic = TG3_EEPROM_MAGIC;
9703 /* adjustments to start on required 4 byte boundary */
9704 b_offset = offset & 3;
9705 b_count = 4 - b_offset;
9706 if (b_count > len) {
9707 /* i.e. offset=1 len=2 */
9710 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9713 memcpy(data, ((char *)&val) + b_offset, b_count);
9716 eeprom->len += b_count;
9719 /* read bytes upto the last 4 byte boundary */
9720 pd = &data[eeprom->len];
9721 for (i = 0; i < (len - (len & 3)); i += 4) {
9722 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9727 memcpy(pd + i, &val, 4);
9732 /* read last bytes not ending on 4 byte boundary */
9733 pd = &data[eeprom->len];
9735 b_offset = offset + len - b_count;
9736 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9739 memcpy(pd, &val, b_count);
9740 eeprom->len += b_count;
9745 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9747 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9749 struct tg3 *tp = netdev_priv(dev);
9751 u32 offset, len, b_offset, odd_len;
9755 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9758 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9759 eeprom->magic != TG3_EEPROM_MAGIC)
9762 offset = eeprom->offset;
9765 if ((b_offset = (offset & 3))) {
9766 /* adjustments to start on required 4 byte boundary */
9767 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9778 /* adjustments to end on required 4 byte boundary */
9780 len = (len + 3) & ~3;
9781 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9787 if (b_offset || odd_len) {
9788 buf = kmalloc(len, GFP_KERNEL);
9792 memcpy(buf, &start, 4);
9794 memcpy(buf+len-4, &end, 4);
9795 memcpy(buf + b_offset, data, eeprom->len);
9798 ret = tg3_nvram_write_block(tp, offset, len, buf);
9806 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9808 struct tg3 *tp = netdev_priv(dev);
9810 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9811 struct phy_device *phydev;
9812 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9814 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9815 return phy_ethtool_gset(phydev, cmd);
9818 cmd->supported = (SUPPORTED_Autoneg);
9820 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9821 cmd->supported |= (SUPPORTED_1000baseT_Half |
9822 SUPPORTED_1000baseT_Full);
9824 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9825 cmd->supported |= (SUPPORTED_100baseT_Half |
9826 SUPPORTED_100baseT_Full |
9827 SUPPORTED_10baseT_Half |
9828 SUPPORTED_10baseT_Full |
9830 cmd->port = PORT_TP;
9832 cmd->supported |= SUPPORTED_FIBRE;
9833 cmd->port = PORT_FIBRE;
9836 cmd->advertising = tp->link_config.advertising;
9837 if (netif_running(dev)) {
9838 cmd->speed = tp->link_config.active_speed;
9839 cmd->duplex = tp->link_config.active_duplex;
9841 cmd->speed = SPEED_INVALID;
9842 cmd->duplex = DUPLEX_INVALID;
9844 cmd->phy_address = tp->phy_addr;
9845 cmd->transceiver = XCVR_INTERNAL;
9846 cmd->autoneg = tp->link_config.autoneg;
9852 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9854 struct tg3 *tp = netdev_priv(dev);
9856 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9857 struct phy_device *phydev;
9858 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9860 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9861 return phy_ethtool_sset(phydev, cmd);
9864 if (cmd->autoneg != AUTONEG_ENABLE &&
9865 cmd->autoneg != AUTONEG_DISABLE)
9868 if (cmd->autoneg == AUTONEG_DISABLE &&
9869 cmd->duplex != DUPLEX_FULL &&
9870 cmd->duplex != DUPLEX_HALF)
9873 if (cmd->autoneg == AUTONEG_ENABLE) {
9874 u32 mask = ADVERTISED_Autoneg |
9876 ADVERTISED_Asym_Pause;
9878 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9879 mask |= ADVERTISED_1000baseT_Half |
9880 ADVERTISED_1000baseT_Full;
9882 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9883 mask |= ADVERTISED_100baseT_Half |
9884 ADVERTISED_100baseT_Full |
9885 ADVERTISED_10baseT_Half |
9886 ADVERTISED_10baseT_Full |
9889 mask |= ADVERTISED_FIBRE;
9891 if (cmd->advertising & ~mask)
9894 mask &= (ADVERTISED_1000baseT_Half |
9895 ADVERTISED_1000baseT_Full |
9896 ADVERTISED_100baseT_Half |
9897 ADVERTISED_100baseT_Full |
9898 ADVERTISED_10baseT_Half |
9899 ADVERTISED_10baseT_Full);
9901 cmd->advertising &= mask;
9903 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9904 if (cmd->speed != SPEED_1000)
9907 if (cmd->duplex != DUPLEX_FULL)
9910 if (cmd->speed != SPEED_100 &&
9911 cmd->speed != SPEED_10)
9916 tg3_full_lock(tp, 0);
9918 tp->link_config.autoneg = cmd->autoneg;
9919 if (cmd->autoneg == AUTONEG_ENABLE) {
9920 tp->link_config.advertising = (cmd->advertising |
9921 ADVERTISED_Autoneg);
9922 tp->link_config.speed = SPEED_INVALID;
9923 tp->link_config.duplex = DUPLEX_INVALID;
9925 tp->link_config.advertising = 0;
9926 tp->link_config.speed = cmd->speed;
9927 tp->link_config.duplex = cmd->duplex;
9930 tp->link_config.orig_speed = tp->link_config.speed;
9931 tp->link_config.orig_duplex = tp->link_config.duplex;
9932 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9934 if (netif_running(dev))
9935 tg3_setup_phy(tp, 1);
9937 tg3_full_unlock(tp);
9942 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9944 struct tg3 *tp = netdev_priv(dev);
9946 strcpy(info->driver, DRV_MODULE_NAME);
9947 strcpy(info->version, DRV_MODULE_VERSION);
9948 strcpy(info->fw_version, tp->fw_ver);
9949 strcpy(info->bus_info, pci_name(tp->pdev));
9952 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9954 struct tg3 *tp = netdev_priv(dev);
9956 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9957 device_can_wakeup(&tp->pdev->dev))
9958 wol->supported = WAKE_MAGIC;
9962 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9963 device_can_wakeup(&tp->pdev->dev))
9964 wol->wolopts = WAKE_MAGIC;
9965 memset(&wol->sopass, 0, sizeof(wol->sopass));
9968 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9970 struct tg3 *tp = netdev_priv(dev);
9971 struct device *dp = &tp->pdev->dev;
9973 if (wol->wolopts & ~WAKE_MAGIC)
9975 if ((wol->wolopts & WAKE_MAGIC) &&
9976 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9979 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
9981 spin_lock_bh(&tp->lock);
9982 if (device_may_wakeup(dp))
9983 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9985 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9986 spin_unlock_bh(&tp->lock);
9992 static u32 tg3_get_msglevel(struct net_device *dev)
9994 struct tg3 *tp = netdev_priv(dev);
9995 return tp->msg_enable;
9998 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10000 struct tg3 *tp = netdev_priv(dev);
10001 tp->msg_enable = value;
10004 static int tg3_set_tso(struct net_device *dev, u32 value)
10006 struct tg3 *tp = netdev_priv(dev);
10008 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
10013 if ((dev->features & NETIF_F_IPV6_CSUM) &&
10014 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
10015 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
10017 dev->features |= NETIF_F_TSO6;
10018 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
10019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10020 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
10021 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
10022 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
10023 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
10024 dev->features |= NETIF_F_TSO_ECN;
10026 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
10028 return ethtool_op_set_tso(dev, value);
10031 static int tg3_nway_reset(struct net_device *dev)
10033 struct tg3 *tp = netdev_priv(dev);
10036 if (!netif_running(dev))
10039 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10042 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10043 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10045 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10049 spin_lock_bh(&tp->lock);
10051 tg3_readphy(tp, MII_BMCR, &bmcr);
10052 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10053 ((bmcr & BMCR_ANENABLE) ||
10054 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10055 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10059 spin_unlock_bh(&tp->lock);
10065 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10067 struct tg3 *tp = netdev_priv(dev);
10069 ering->rx_max_pending = tp->rx_std_ring_mask;
10070 ering->rx_mini_max_pending = 0;
10071 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10072 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10074 ering->rx_jumbo_max_pending = 0;
10076 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10078 ering->rx_pending = tp->rx_pending;
10079 ering->rx_mini_pending = 0;
10080 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
10081 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10083 ering->rx_jumbo_pending = 0;
10085 ering->tx_pending = tp->napi[0].tx_pending;
10088 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10090 struct tg3 *tp = netdev_priv(dev);
10091 int i, irq_sync = 0, err = 0;
10093 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10094 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10095 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10096 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10097 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
10098 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10101 if (netif_running(dev)) {
10103 tg3_netif_stop(tp);
10107 tg3_full_lock(tp, irq_sync);
10109 tp->rx_pending = ering->rx_pending;
10111 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
10112 tp->rx_pending > 63)
10113 tp->rx_pending = 63;
10114 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10116 for (i = 0; i < tp->irq_max; i++)
10117 tp->napi[i].tx_pending = ering->tx_pending;
10119 if (netif_running(dev)) {
10120 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10121 err = tg3_restart_hw(tp, 1);
10123 tg3_netif_start(tp);
10126 tg3_full_unlock(tp);
10128 if (irq_sync && !err)
10134 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10136 struct tg3 *tp = netdev_priv(dev);
10138 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
10140 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10141 epause->rx_pause = 1;
10143 epause->rx_pause = 0;
10145 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10146 epause->tx_pause = 1;
10148 epause->tx_pause = 0;
10151 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10153 struct tg3 *tp = netdev_priv(dev);
10156 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10158 struct phy_device *phydev;
10160 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10162 if (!(phydev->supported & SUPPORTED_Pause) ||
10163 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10164 (epause->rx_pause != epause->tx_pause)))
10167 tp->link_config.flowctrl = 0;
10168 if (epause->rx_pause) {
10169 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10171 if (epause->tx_pause) {
10172 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10173 newadv = ADVERTISED_Pause;
10175 newadv = ADVERTISED_Pause |
10176 ADVERTISED_Asym_Pause;
10177 } else if (epause->tx_pause) {
10178 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10179 newadv = ADVERTISED_Asym_Pause;
10183 if (epause->autoneg)
10184 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10186 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10188 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10189 u32 oldadv = phydev->advertising &
10190 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10191 if (oldadv != newadv) {
10192 phydev->advertising &=
10193 ~(ADVERTISED_Pause |
10194 ADVERTISED_Asym_Pause);
10195 phydev->advertising |= newadv;
10196 if (phydev->autoneg) {
10198 * Always renegotiate the link to
10199 * inform our link partner of our
10200 * flow control settings, even if the
10201 * flow control is forced. Let
10202 * tg3_adjust_link() do the final
10203 * flow control setup.
10205 return phy_start_aneg(phydev);
10209 if (!epause->autoneg)
10210 tg3_setup_flow_control(tp, 0, 0);
10212 tp->link_config.orig_advertising &=
10213 ~(ADVERTISED_Pause |
10214 ADVERTISED_Asym_Pause);
10215 tp->link_config.orig_advertising |= newadv;
10220 if (netif_running(dev)) {
10221 tg3_netif_stop(tp);
10225 tg3_full_lock(tp, irq_sync);
10227 if (epause->autoneg)
10228 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
10230 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
10231 if (epause->rx_pause)
10232 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10234 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10235 if (epause->tx_pause)
10236 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10238 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10240 if (netif_running(dev)) {
10241 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10242 err = tg3_restart_hw(tp, 1);
10244 tg3_netif_start(tp);
10247 tg3_full_unlock(tp);
10253 static u32 tg3_get_rx_csum(struct net_device *dev)
10255 struct tg3 *tp = netdev_priv(dev);
10256 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10259 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10261 struct tg3 *tp = netdev_priv(dev);
10263 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10269 spin_lock_bh(&tp->lock);
10271 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10273 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10274 spin_unlock_bh(&tp->lock);
10279 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10281 struct tg3 *tp = netdev_priv(dev);
10283 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10289 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10290 ethtool_op_set_tx_ipv6_csum(dev, data);
10292 ethtool_op_set_tx_csum(dev, data);
10297 static int tg3_get_sset_count(struct net_device *dev, int sset)
10301 return TG3_NUM_TEST;
10303 return TG3_NUM_STATS;
10305 return -EOPNOTSUPP;
10309 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10311 switch (stringset) {
10313 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10316 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10319 WARN_ON(1); /* we need a WARN() */
10324 static int tg3_phys_id(struct net_device *dev, u32 data)
10326 struct tg3 *tp = netdev_priv(dev);
10329 if (!netif_running(tp->dev))
10333 data = UINT_MAX / 2;
10335 for (i = 0; i < (data * 2); i++) {
10337 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10338 LED_CTRL_1000MBPS_ON |
10339 LED_CTRL_100MBPS_ON |
10340 LED_CTRL_10MBPS_ON |
10341 LED_CTRL_TRAFFIC_OVERRIDE |
10342 LED_CTRL_TRAFFIC_BLINK |
10343 LED_CTRL_TRAFFIC_LED);
10346 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10347 LED_CTRL_TRAFFIC_OVERRIDE);
10349 if (msleep_interruptible(500))
10352 tw32(MAC_LED_CTRL, tp->led_ctrl);
10356 static void tg3_get_ethtool_stats(struct net_device *dev,
10357 struct ethtool_stats *estats, u64 *tmp_stats)
10359 struct tg3 *tp = netdev_priv(dev);
10360 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10363 #define NVRAM_TEST_SIZE 0x100
10364 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10365 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10366 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10367 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10368 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10370 static int tg3_test_nvram(struct tg3 *tp)
10374 int i, j, k, err = 0, size;
10376 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10379 if (tg3_nvram_read(tp, 0, &magic) != 0)
10382 if (magic == TG3_EEPROM_MAGIC)
10383 size = NVRAM_TEST_SIZE;
10384 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10385 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10386 TG3_EEPROM_SB_FORMAT_1) {
10387 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10388 case TG3_EEPROM_SB_REVISION_0:
10389 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10391 case TG3_EEPROM_SB_REVISION_2:
10392 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10394 case TG3_EEPROM_SB_REVISION_3:
10395 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10402 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10403 size = NVRAM_SELFBOOT_HW_SIZE;
10407 buf = kmalloc(size, GFP_KERNEL);
10412 for (i = 0, j = 0; i < size; i += 4, j++) {
10413 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10420 /* Selfboot format */
10421 magic = be32_to_cpu(buf[0]);
10422 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10423 TG3_EEPROM_MAGIC_FW) {
10424 u8 *buf8 = (u8 *) buf, csum8 = 0;
10426 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10427 TG3_EEPROM_SB_REVISION_2) {
10428 /* For rev 2, the csum doesn't include the MBA. */
10429 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10431 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10434 for (i = 0; i < size; i++)
10447 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10448 TG3_EEPROM_MAGIC_HW) {
10449 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10450 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10451 u8 *buf8 = (u8 *) buf;
10453 /* Separate the parity bits and the data bytes. */
10454 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10455 if ((i == 0) || (i == 8)) {
10459 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10460 parity[k++] = buf8[i] & msk;
10462 } else if (i == 16) {
10466 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10467 parity[k++] = buf8[i] & msk;
10470 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10471 parity[k++] = buf8[i] & msk;
10474 data[j++] = buf8[i];
10478 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10479 u8 hw8 = hweight8(data[i]);
10481 if ((hw8 & 0x1) && parity[i])
10483 else if (!(hw8 & 0x1) && !parity[i])
10490 /* Bootstrap checksum at offset 0x10 */
10491 csum = calc_crc((unsigned char *) buf, 0x10);
10492 if (csum != be32_to_cpu(buf[0x10/4]))
10495 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10496 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10497 if (csum != be32_to_cpu(buf[0xfc/4]))
10507 #define TG3_SERDES_TIMEOUT_SEC 2
10508 #define TG3_COPPER_TIMEOUT_SEC 6
10510 static int tg3_test_link(struct tg3 *tp)
10514 if (!netif_running(tp->dev))
10517 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10518 max = TG3_SERDES_TIMEOUT_SEC;
10520 max = TG3_COPPER_TIMEOUT_SEC;
10522 for (i = 0; i < max; i++) {
10523 if (netif_carrier_ok(tp->dev))
10526 if (msleep_interruptible(1000))
10533 /* Only test the commonly used registers */
10534 static int tg3_test_registers(struct tg3 *tp)
10536 int i, is_5705, is_5750;
10537 u32 offset, read_mask, write_mask, val, save_val, read_val;
10541 #define TG3_FL_5705 0x1
10542 #define TG3_FL_NOT_5705 0x2
10543 #define TG3_FL_NOT_5788 0x4
10544 #define TG3_FL_NOT_5750 0x8
10548 /* MAC Control Registers */
10549 { MAC_MODE, TG3_FL_NOT_5705,
10550 0x00000000, 0x00ef6f8c },
10551 { MAC_MODE, TG3_FL_5705,
10552 0x00000000, 0x01ef6b8c },
10553 { MAC_STATUS, TG3_FL_NOT_5705,
10554 0x03800107, 0x00000000 },
10555 { MAC_STATUS, TG3_FL_5705,
10556 0x03800100, 0x00000000 },
10557 { MAC_ADDR_0_HIGH, 0x0000,
10558 0x00000000, 0x0000ffff },
10559 { MAC_ADDR_0_LOW, 0x0000,
10560 0x00000000, 0xffffffff },
10561 { MAC_RX_MTU_SIZE, 0x0000,
10562 0x00000000, 0x0000ffff },
10563 { MAC_TX_MODE, 0x0000,
10564 0x00000000, 0x00000070 },
10565 { MAC_TX_LENGTHS, 0x0000,
10566 0x00000000, 0x00003fff },
10567 { MAC_RX_MODE, TG3_FL_NOT_5705,
10568 0x00000000, 0x000007fc },
10569 { MAC_RX_MODE, TG3_FL_5705,
10570 0x00000000, 0x000007dc },
10571 { MAC_HASH_REG_0, 0x0000,
10572 0x00000000, 0xffffffff },
10573 { MAC_HASH_REG_1, 0x0000,
10574 0x00000000, 0xffffffff },
10575 { MAC_HASH_REG_2, 0x0000,
10576 0x00000000, 0xffffffff },
10577 { MAC_HASH_REG_3, 0x0000,
10578 0x00000000, 0xffffffff },
10580 /* Receive Data and Receive BD Initiator Control Registers. */
10581 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10582 0x00000000, 0xffffffff },
10583 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10584 0x00000000, 0xffffffff },
10585 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10586 0x00000000, 0x00000003 },
10587 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10588 0x00000000, 0xffffffff },
10589 { RCVDBDI_STD_BD+0, 0x0000,
10590 0x00000000, 0xffffffff },
10591 { RCVDBDI_STD_BD+4, 0x0000,
10592 0x00000000, 0xffffffff },
10593 { RCVDBDI_STD_BD+8, 0x0000,
10594 0x00000000, 0xffff0002 },
10595 { RCVDBDI_STD_BD+0xc, 0x0000,
10596 0x00000000, 0xffffffff },
10598 /* Receive BD Initiator Control Registers. */
10599 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10600 0x00000000, 0xffffffff },
10601 { RCVBDI_STD_THRESH, TG3_FL_5705,
10602 0x00000000, 0x000003ff },
10603 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10604 0x00000000, 0xffffffff },
10606 /* Host Coalescing Control Registers. */
10607 { HOSTCC_MODE, TG3_FL_NOT_5705,
10608 0x00000000, 0x00000004 },
10609 { HOSTCC_MODE, TG3_FL_5705,
10610 0x00000000, 0x000000f6 },
10611 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10612 0x00000000, 0xffffffff },
10613 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10614 0x00000000, 0x000003ff },
10615 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10616 0x00000000, 0xffffffff },
10617 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10618 0x00000000, 0x000003ff },
10619 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10620 0x00000000, 0xffffffff },
10621 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10622 0x00000000, 0x000000ff },
10623 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10624 0x00000000, 0xffffffff },
10625 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10626 0x00000000, 0x000000ff },
10627 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10628 0x00000000, 0xffffffff },
10629 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10630 0x00000000, 0xffffffff },
10631 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10632 0x00000000, 0xffffffff },
10633 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10634 0x00000000, 0x000000ff },
10635 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10636 0x00000000, 0xffffffff },
10637 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10638 0x00000000, 0x000000ff },
10639 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10640 0x00000000, 0xffffffff },
10641 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10642 0x00000000, 0xffffffff },
10643 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10644 0x00000000, 0xffffffff },
10645 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10646 0x00000000, 0xffffffff },
10647 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10648 0x00000000, 0xffffffff },
10649 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10650 0xffffffff, 0x00000000 },
10651 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10652 0xffffffff, 0x00000000 },
10654 /* Buffer Manager Control Registers. */
10655 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10656 0x00000000, 0x007fff80 },
10657 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10658 0x00000000, 0x007fffff },
10659 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10660 0x00000000, 0x0000003f },
10661 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10662 0x00000000, 0x000001ff },
10663 { BUFMGR_MB_HIGH_WATER, 0x0000,
10664 0x00000000, 0x000001ff },
10665 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10666 0xffffffff, 0x00000000 },
10667 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10668 0xffffffff, 0x00000000 },
10670 /* Mailbox Registers */
10671 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10672 0x00000000, 0x000001ff },
10673 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10674 0x00000000, 0x000001ff },
10675 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10676 0x00000000, 0x000007ff },
10677 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10678 0x00000000, 0x000001ff },
10680 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10683 is_5705 = is_5750 = 0;
10684 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10686 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10690 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10691 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10694 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10697 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10698 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10701 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10704 offset = (u32) reg_tbl[i].offset;
10705 read_mask = reg_tbl[i].read_mask;
10706 write_mask = reg_tbl[i].write_mask;
10708 /* Save the original register content */
10709 save_val = tr32(offset);
10711 /* Determine the read-only value. */
10712 read_val = save_val & read_mask;
10714 /* Write zero to the register, then make sure the read-only bits
10715 * are not changed and the read/write bits are all zeros.
10719 val = tr32(offset);
10721 /* Test the read-only and read/write bits. */
10722 if (((val & read_mask) != read_val) || (val & write_mask))
10725 /* Write ones to all the bits defined by RdMask and WrMask, then
10726 * make sure the read-only bits are not changed and the
10727 * read/write bits are all ones.
10729 tw32(offset, read_mask | write_mask);
10731 val = tr32(offset);
10733 /* Test the read-only bits. */
10734 if ((val & read_mask) != read_val)
10737 /* Test the read/write bits. */
10738 if ((val & write_mask) != write_mask)
10741 tw32(offset, save_val);
10747 if (netif_msg_hw(tp))
10748 netdev_err(tp->dev,
10749 "Register test failed at offset %x\n", offset);
10750 tw32(offset, save_val);
10754 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10756 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10760 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10761 for (j = 0; j < len; j += 4) {
10764 tg3_write_mem(tp, offset + j, test_pattern[i]);
10765 tg3_read_mem(tp, offset + j, &val);
10766 if (val != test_pattern[i])
10773 static int tg3_test_memory(struct tg3 *tp)
10775 static struct mem_entry {
10778 } mem_tbl_570x[] = {
10779 { 0x00000000, 0x00b50},
10780 { 0x00002000, 0x1c000},
10781 { 0xffffffff, 0x00000}
10782 }, mem_tbl_5705[] = {
10783 { 0x00000100, 0x0000c},
10784 { 0x00000200, 0x00008},
10785 { 0x00004000, 0x00800},
10786 { 0x00006000, 0x01000},
10787 { 0x00008000, 0x02000},
10788 { 0x00010000, 0x0e000},
10789 { 0xffffffff, 0x00000}
10790 }, mem_tbl_5755[] = {
10791 { 0x00000200, 0x00008},
10792 { 0x00004000, 0x00800},
10793 { 0x00006000, 0x00800},
10794 { 0x00008000, 0x02000},
10795 { 0x00010000, 0x0c000},
10796 { 0xffffffff, 0x00000}
10797 }, mem_tbl_5906[] = {
10798 { 0x00000200, 0x00008},
10799 { 0x00004000, 0x00400},
10800 { 0x00006000, 0x00400},
10801 { 0x00008000, 0x01000},
10802 { 0x00010000, 0x01000},
10803 { 0xffffffff, 0x00000}
10804 }, mem_tbl_5717[] = {
10805 { 0x00000200, 0x00008},
10806 { 0x00010000, 0x0a000},
10807 { 0x00020000, 0x13c00},
10808 { 0xffffffff, 0x00000}
10809 }, mem_tbl_57765[] = {
10810 { 0x00000200, 0x00008},
10811 { 0x00004000, 0x00800},
10812 { 0x00006000, 0x09800},
10813 { 0x00010000, 0x0a000},
10814 { 0xffffffff, 0x00000}
10816 struct mem_entry *mem_tbl;
10820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10821 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10822 mem_tbl = mem_tbl_5717;
10823 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10824 mem_tbl = mem_tbl_57765;
10825 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10826 mem_tbl = mem_tbl_5755;
10827 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10828 mem_tbl = mem_tbl_5906;
10829 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10830 mem_tbl = mem_tbl_5705;
10832 mem_tbl = mem_tbl_570x;
10834 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10835 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10843 #define TG3_MAC_LOOPBACK 0
10844 #define TG3_PHY_LOOPBACK 1
10846 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10848 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10849 u32 desc_idx, coal_now;
10850 struct sk_buff *skb, *rx_skb;
10853 int num_pkts, tx_len, rx_len, i, err;
10854 struct tg3_rx_buffer_desc *desc;
10855 struct tg3_napi *tnapi, *rnapi;
10856 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10858 tnapi = &tp->napi[0];
10859 rnapi = &tp->napi[0];
10860 if (tp->irq_cnt > 1) {
10861 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
10862 rnapi = &tp->napi[1];
10863 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10864 tnapi = &tp->napi[1];
10866 coal_now = tnapi->coal_now | rnapi->coal_now;
10868 if (loopback_mode == TG3_MAC_LOOPBACK) {
10869 /* HW errata - mac loopback fails in some cases on 5780.
10870 * Normal traffic and PHY loopback are not affected by
10871 * errata. Also, the MAC loopback test is deprecated for
10872 * all newer ASIC revisions.
10874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
10875 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT))
10878 mac_mode = tp->mac_mode &
10879 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10880 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
10881 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10882 mac_mode |= MAC_MODE_LINK_POLARITY;
10883 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
10884 mac_mode |= MAC_MODE_PORT_MODE_MII;
10886 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10887 tw32(MAC_MODE, mac_mode);
10888 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10891 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10892 tg3_phy_fet_toggle_apd(tp, false);
10893 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10895 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10897 tg3_phy_toggle_automdix(tp, 0);
10899 tg3_writephy(tp, MII_BMCR, val);
10902 mac_mode = tp->mac_mode &
10903 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
10904 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
10905 tg3_writephy(tp, MII_TG3_FET_PTEST,
10906 MII_TG3_FET_PTEST_FRC_TX_LINK |
10907 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10908 /* The write needs to be flushed for the AC131 */
10909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10910 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10911 mac_mode |= MAC_MODE_PORT_MODE_MII;
10913 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10915 /* reset to prevent losing 1st rx packet intermittently */
10916 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10917 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10919 tw32_f(MAC_RX_MODE, tp->rx_mode);
10921 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10922 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10923 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10924 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10925 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10926 mac_mode |= MAC_MODE_LINK_POLARITY;
10927 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10928 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10930 tw32(MAC_MODE, mac_mode);
10932 /* Wait for link */
10933 for (i = 0; i < 100; i++) {
10934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
10945 skb = netdev_alloc_skb(tp->dev, tx_len);
10949 tx_data = skb_put(skb, tx_len);
10950 memcpy(tx_data, tp->dev->dev_addr, 6);
10951 memset(tx_data + 6, 0x0, 8);
10953 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10955 for (i = 14; i < tx_len; i++)
10956 tx_data[i] = (u8) (i & 0xff);
10958 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10959 if (pci_dma_mapping_error(tp->pdev, map)) {
10960 dev_kfree_skb(skb);
10964 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10969 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10973 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10978 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10979 tr32_mailbox(tnapi->prodmbox);
10983 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10984 for (i = 0; i < 35; i++) {
10985 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10990 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10991 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10992 if ((tx_idx == tnapi->tx_prod) &&
10993 (rx_idx == (rx_start_idx + num_pkts)))
10997 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10998 dev_kfree_skb(skb);
11000 if (tx_idx != tnapi->tx_prod)
11003 if (rx_idx != rx_start_idx + num_pkts)
11006 desc = &rnapi->rx_rcb[rx_start_idx];
11007 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11008 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11009 if (opaque_key != RXD_OPAQUE_RING_STD)
11012 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11013 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11016 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
11017 if (rx_len != tx_len)
11020 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11022 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
11023 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
11025 for (i = 14; i < tx_len; i++) {
11026 if (*(rx_skb->data + i) != (u8) (i & 0xff))
11031 /* tg3_free_rings will unmap and free the rx_skb */
11036 #define TG3_MAC_LOOPBACK_FAILED 1
11037 #define TG3_PHY_LOOPBACK_FAILED 2
11038 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
11039 TG3_PHY_LOOPBACK_FAILED)
11041 static int tg3_test_loopback(struct tg3 *tp)
11044 u32 eee_cap, cpmuctrl = 0;
11046 if (!netif_running(tp->dev))
11047 return TG3_LOOPBACK_FAILED;
11049 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11050 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11052 err = tg3_reset_hw(tp, 1);
11054 err = TG3_LOOPBACK_FAILED;
11058 /* Turn off gphy autopowerdown. */
11059 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11060 tg3_phy_toggle_apd(tp, false);
11062 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11066 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11068 /* Wait for up to 40 microseconds to acquire lock. */
11069 for (i = 0; i < 4; i++) {
11070 status = tr32(TG3_CPMU_MUTEX_GNT);
11071 if (status == CPMU_MUTEX_GNT_DRIVER)
11076 if (status != CPMU_MUTEX_GNT_DRIVER) {
11077 err = TG3_LOOPBACK_FAILED;
11081 /* Turn off link-based power management. */
11082 cpmuctrl = tr32(TG3_CPMU_CTRL);
11083 tw32(TG3_CPMU_CTRL,
11084 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11085 CPMU_CTRL_LINK_AWARE_MODE));
11088 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
11089 err |= TG3_MAC_LOOPBACK_FAILED;
11091 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
11092 tw32(TG3_CPMU_CTRL, cpmuctrl);
11094 /* Release the mutex */
11095 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11098 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11099 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
11100 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
11101 err |= TG3_PHY_LOOPBACK_FAILED;
11104 /* Re-enable gphy autopowerdown. */
11105 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11106 tg3_phy_toggle_apd(tp, true);
11109 tp->phy_flags |= eee_cap;
11114 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11117 struct tg3 *tp = netdev_priv(dev);
11119 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11122 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11124 if (tg3_test_nvram(tp) != 0) {
11125 etest->flags |= ETH_TEST_FL_FAILED;
11128 if (tg3_test_link(tp) != 0) {
11129 etest->flags |= ETH_TEST_FL_FAILED;
11132 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11133 int err, err2 = 0, irq_sync = 0;
11135 if (netif_running(dev)) {
11137 tg3_netif_stop(tp);
11141 tg3_full_lock(tp, irq_sync);
11143 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11144 err = tg3_nvram_lock(tp);
11145 tg3_halt_cpu(tp, RX_CPU_BASE);
11146 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11147 tg3_halt_cpu(tp, TX_CPU_BASE);
11149 tg3_nvram_unlock(tp);
11151 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11154 if (tg3_test_registers(tp) != 0) {
11155 etest->flags |= ETH_TEST_FL_FAILED;
11158 if (tg3_test_memory(tp) != 0) {
11159 etest->flags |= ETH_TEST_FL_FAILED;
11162 if ((data[4] = tg3_test_loopback(tp)) != 0)
11163 etest->flags |= ETH_TEST_FL_FAILED;
11165 tg3_full_unlock(tp);
11167 if (tg3_test_interrupt(tp) != 0) {
11168 etest->flags |= ETH_TEST_FL_FAILED;
11172 tg3_full_lock(tp, 0);
11174 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11175 if (netif_running(dev)) {
11176 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
11177 err2 = tg3_restart_hw(tp, 1);
11179 tg3_netif_start(tp);
11182 tg3_full_unlock(tp);
11184 if (irq_sync && !err2)
11187 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11188 tg3_power_down(tp);
11192 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11194 struct mii_ioctl_data *data = if_mii(ifr);
11195 struct tg3 *tp = netdev_priv(dev);
11198 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
11199 struct phy_device *phydev;
11200 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11202 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11203 return phy_mii_ioctl(phydev, ifr, cmd);
11208 data->phy_id = tp->phy_addr;
11211 case SIOCGMIIREG: {
11214 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11215 break; /* We have no PHY */
11217 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11218 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11219 !netif_running(dev)))
11222 spin_lock_bh(&tp->lock);
11223 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11224 spin_unlock_bh(&tp->lock);
11226 data->val_out = mii_regval;
11232 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11233 break; /* We have no PHY */
11235 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11236 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11237 !netif_running(dev)))
11240 spin_lock_bh(&tp->lock);
11241 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11242 spin_unlock_bh(&tp->lock);
11250 return -EOPNOTSUPP;
11253 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11255 struct tg3 *tp = netdev_priv(dev);
11257 memcpy(ec, &tp->coal, sizeof(*ec));
11261 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11263 struct tg3 *tp = netdev_priv(dev);
11264 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11265 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11267 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11268 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11269 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11270 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11271 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11274 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11275 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11276 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11277 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11278 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11279 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11280 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11281 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11282 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11283 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11286 /* No rx interrupts will be generated if both are zero */
11287 if ((ec->rx_coalesce_usecs == 0) &&
11288 (ec->rx_max_coalesced_frames == 0))
11291 /* No tx interrupts will be generated if both are zero */
11292 if ((ec->tx_coalesce_usecs == 0) &&
11293 (ec->tx_max_coalesced_frames == 0))
11296 /* Only copy relevant parameters, ignore all others. */
11297 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11298 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11299 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11300 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11301 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11302 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11303 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11304 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11305 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11307 if (netif_running(dev)) {
11308 tg3_full_lock(tp, 0);
11309 __tg3_set_coalesce(tp, &tp->coal);
11310 tg3_full_unlock(tp);
11315 static const struct ethtool_ops tg3_ethtool_ops = {
11316 .get_settings = tg3_get_settings,
11317 .set_settings = tg3_set_settings,
11318 .get_drvinfo = tg3_get_drvinfo,
11319 .get_regs_len = tg3_get_regs_len,
11320 .get_regs = tg3_get_regs,
11321 .get_wol = tg3_get_wol,
11322 .set_wol = tg3_set_wol,
11323 .get_msglevel = tg3_get_msglevel,
11324 .set_msglevel = tg3_set_msglevel,
11325 .nway_reset = tg3_nway_reset,
11326 .get_link = ethtool_op_get_link,
11327 .get_eeprom_len = tg3_get_eeprom_len,
11328 .get_eeprom = tg3_get_eeprom,
11329 .set_eeprom = tg3_set_eeprom,
11330 .get_ringparam = tg3_get_ringparam,
11331 .set_ringparam = tg3_set_ringparam,
11332 .get_pauseparam = tg3_get_pauseparam,
11333 .set_pauseparam = tg3_set_pauseparam,
11334 .get_rx_csum = tg3_get_rx_csum,
11335 .set_rx_csum = tg3_set_rx_csum,
11336 .set_tx_csum = tg3_set_tx_csum,
11337 .set_sg = ethtool_op_set_sg,
11338 .set_tso = tg3_set_tso,
11339 .self_test = tg3_self_test,
11340 .get_strings = tg3_get_strings,
11341 .phys_id = tg3_phys_id,
11342 .get_ethtool_stats = tg3_get_ethtool_stats,
11343 .get_coalesce = tg3_get_coalesce,
11344 .set_coalesce = tg3_set_coalesce,
11345 .get_sset_count = tg3_get_sset_count,
11348 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11350 u32 cursize, val, magic;
11352 tp->nvram_size = EEPROM_CHIP_SIZE;
11354 if (tg3_nvram_read(tp, 0, &magic) != 0)
11357 if ((magic != TG3_EEPROM_MAGIC) &&
11358 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11359 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11363 * Size the chip by reading offsets at increasing powers of two.
11364 * When we encounter our validation signature, we know the addressing
11365 * has wrapped around, and thus have our chip size.
11369 while (cursize < tp->nvram_size) {
11370 if (tg3_nvram_read(tp, cursize, &val) != 0)
11379 tp->nvram_size = cursize;
11382 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11386 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11387 tg3_nvram_read(tp, 0, &val) != 0)
11390 /* Selfboot format */
11391 if (val != TG3_EEPROM_MAGIC) {
11392 tg3_get_eeprom_size(tp);
11396 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11398 /* This is confusing. We want to operate on the
11399 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11400 * call will read from NVRAM and byteswap the data
11401 * according to the byteswapping settings for all
11402 * other register accesses. This ensures the data we
11403 * want will always reside in the lower 16-bits.
11404 * However, the data in NVRAM is in LE format, which
11405 * means the data from the NVRAM read will always be
11406 * opposite the endianness of the CPU. The 16-bit
11407 * byteswap then brings the data to CPU endianness.
11409 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11413 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11416 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11420 nvcfg1 = tr32(NVRAM_CFG1);
11421 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11422 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11424 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11425 tw32(NVRAM_CFG1, nvcfg1);
11428 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11429 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11430 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11431 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11432 tp->nvram_jedecnum = JEDEC_ATMEL;
11433 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11434 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11436 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11437 tp->nvram_jedecnum = JEDEC_ATMEL;
11438 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11440 case FLASH_VENDOR_ATMEL_EEPROM:
11441 tp->nvram_jedecnum = JEDEC_ATMEL;
11442 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11443 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11445 case FLASH_VENDOR_ST:
11446 tp->nvram_jedecnum = JEDEC_ST;
11447 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11448 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11450 case FLASH_VENDOR_SAIFUN:
11451 tp->nvram_jedecnum = JEDEC_SAIFUN;
11452 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11454 case FLASH_VENDOR_SST_SMALL:
11455 case FLASH_VENDOR_SST_LARGE:
11456 tp->nvram_jedecnum = JEDEC_SST;
11457 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11461 tp->nvram_jedecnum = JEDEC_ATMEL;
11462 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11463 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11467 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11469 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11470 case FLASH_5752PAGE_SIZE_256:
11471 tp->nvram_pagesize = 256;
11473 case FLASH_5752PAGE_SIZE_512:
11474 tp->nvram_pagesize = 512;
11476 case FLASH_5752PAGE_SIZE_1K:
11477 tp->nvram_pagesize = 1024;
11479 case FLASH_5752PAGE_SIZE_2K:
11480 tp->nvram_pagesize = 2048;
11482 case FLASH_5752PAGE_SIZE_4K:
11483 tp->nvram_pagesize = 4096;
11485 case FLASH_5752PAGE_SIZE_264:
11486 tp->nvram_pagesize = 264;
11488 case FLASH_5752PAGE_SIZE_528:
11489 tp->nvram_pagesize = 528;
11494 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11498 nvcfg1 = tr32(NVRAM_CFG1);
11500 /* NVRAM protection for TPM */
11501 if (nvcfg1 & (1 << 27))
11502 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11504 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11505 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11506 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11507 tp->nvram_jedecnum = JEDEC_ATMEL;
11508 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11510 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11511 tp->nvram_jedecnum = JEDEC_ATMEL;
11512 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11513 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11515 case FLASH_5752VENDOR_ST_M45PE10:
11516 case FLASH_5752VENDOR_ST_M45PE20:
11517 case FLASH_5752VENDOR_ST_M45PE40:
11518 tp->nvram_jedecnum = JEDEC_ST;
11519 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11520 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11524 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11525 tg3_nvram_get_pagesize(tp, nvcfg1);
11527 /* For eeprom, set pagesize to maximum eeprom size */
11528 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11530 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11531 tw32(NVRAM_CFG1, nvcfg1);
11535 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11537 u32 nvcfg1, protect = 0;
11539 nvcfg1 = tr32(NVRAM_CFG1);
11541 /* NVRAM protection for TPM */
11542 if (nvcfg1 & (1 << 27)) {
11543 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11547 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11549 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11550 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11551 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11552 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11553 tp->nvram_jedecnum = JEDEC_ATMEL;
11554 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11555 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11556 tp->nvram_pagesize = 264;
11557 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11558 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11559 tp->nvram_size = (protect ? 0x3e200 :
11560 TG3_NVRAM_SIZE_512KB);
11561 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11562 tp->nvram_size = (protect ? 0x1f200 :
11563 TG3_NVRAM_SIZE_256KB);
11565 tp->nvram_size = (protect ? 0x1f200 :
11566 TG3_NVRAM_SIZE_128KB);
11568 case FLASH_5752VENDOR_ST_M45PE10:
11569 case FLASH_5752VENDOR_ST_M45PE20:
11570 case FLASH_5752VENDOR_ST_M45PE40:
11571 tp->nvram_jedecnum = JEDEC_ST;
11572 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11573 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11574 tp->nvram_pagesize = 256;
11575 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11576 tp->nvram_size = (protect ?
11577 TG3_NVRAM_SIZE_64KB :
11578 TG3_NVRAM_SIZE_128KB);
11579 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11580 tp->nvram_size = (protect ?
11581 TG3_NVRAM_SIZE_64KB :
11582 TG3_NVRAM_SIZE_256KB);
11584 tp->nvram_size = (protect ?
11585 TG3_NVRAM_SIZE_128KB :
11586 TG3_NVRAM_SIZE_512KB);
11591 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11595 nvcfg1 = tr32(NVRAM_CFG1);
11597 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11598 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11599 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11600 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11601 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11602 tp->nvram_jedecnum = JEDEC_ATMEL;
11603 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11604 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11606 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11607 tw32(NVRAM_CFG1, nvcfg1);
11609 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11610 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11611 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11612 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11613 tp->nvram_jedecnum = JEDEC_ATMEL;
11614 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11615 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11616 tp->nvram_pagesize = 264;
11618 case FLASH_5752VENDOR_ST_M45PE10:
11619 case FLASH_5752VENDOR_ST_M45PE20:
11620 case FLASH_5752VENDOR_ST_M45PE40:
11621 tp->nvram_jedecnum = JEDEC_ST;
11622 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11623 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11624 tp->nvram_pagesize = 256;
11629 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11631 u32 nvcfg1, protect = 0;
11633 nvcfg1 = tr32(NVRAM_CFG1);
11635 /* NVRAM protection for TPM */
11636 if (nvcfg1 & (1 << 27)) {
11637 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11641 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11643 case FLASH_5761VENDOR_ATMEL_ADB021D:
11644 case FLASH_5761VENDOR_ATMEL_ADB041D:
11645 case FLASH_5761VENDOR_ATMEL_ADB081D:
11646 case FLASH_5761VENDOR_ATMEL_ADB161D:
11647 case FLASH_5761VENDOR_ATMEL_MDB021D:
11648 case FLASH_5761VENDOR_ATMEL_MDB041D:
11649 case FLASH_5761VENDOR_ATMEL_MDB081D:
11650 case FLASH_5761VENDOR_ATMEL_MDB161D:
11651 tp->nvram_jedecnum = JEDEC_ATMEL;
11652 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11653 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11654 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11655 tp->nvram_pagesize = 256;
11657 case FLASH_5761VENDOR_ST_A_M45PE20:
11658 case FLASH_5761VENDOR_ST_A_M45PE40:
11659 case FLASH_5761VENDOR_ST_A_M45PE80:
11660 case FLASH_5761VENDOR_ST_A_M45PE16:
11661 case FLASH_5761VENDOR_ST_M_M45PE20:
11662 case FLASH_5761VENDOR_ST_M_M45PE40:
11663 case FLASH_5761VENDOR_ST_M_M45PE80:
11664 case FLASH_5761VENDOR_ST_M_M45PE16:
11665 tp->nvram_jedecnum = JEDEC_ST;
11666 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11667 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11668 tp->nvram_pagesize = 256;
11673 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11676 case FLASH_5761VENDOR_ATMEL_ADB161D:
11677 case FLASH_5761VENDOR_ATMEL_MDB161D:
11678 case FLASH_5761VENDOR_ST_A_M45PE16:
11679 case FLASH_5761VENDOR_ST_M_M45PE16:
11680 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11682 case FLASH_5761VENDOR_ATMEL_ADB081D:
11683 case FLASH_5761VENDOR_ATMEL_MDB081D:
11684 case FLASH_5761VENDOR_ST_A_M45PE80:
11685 case FLASH_5761VENDOR_ST_M_M45PE80:
11686 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11688 case FLASH_5761VENDOR_ATMEL_ADB041D:
11689 case FLASH_5761VENDOR_ATMEL_MDB041D:
11690 case FLASH_5761VENDOR_ST_A_M45PE40:
11691 case FLASH_5761VENDOR_ST_M_M45PE40:
11692 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11694 case FLASH_5761VENDOR_ATMEL_ADB021D:
11695 case FLASH_5761VENDOR_ATMEL_MDB021D:
11696 case FLASH_5761VENDOR_ST_A_M45PE20:
11697 case FLASH_5761VENDOR_ST_M_M45PE20:
11698 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11704 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11706 tp->nvram_jedecnum = JEDEC_ATMEL;
11707 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11708 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11711 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11715 nvcfg1 = tr32(NVRAM_CFG1);
11717 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11718 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11719 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11720 tp->nvram_jedecnum = JEDEC_ATMEL;
11721 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11722 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11724 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11725 tw32(NVRAM_CFG1, nvcfg1);
11727 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11728 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11729 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11730 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11731 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11732 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11733 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11734 tp->nvram_jedecnum = JEDEC_ATMEL;
11735 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11736 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11738 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11739 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11740 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11741 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11742 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11744 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11745 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11746 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11748 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11749 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11750 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11754 case FLASH_5752VENDOR_ST_M45PE10:
11755 case FLASH_5752VENDOR_ST_M45PE20:
11756 case FLASH_5752VENDOR_ST_M45PE40:
11757 tp->nvram_jedecnum = JEDEC_ST;
11758 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11759 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11761 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11762 case FLASH_5752VENDOR_ST_M45PE10:
11763 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11765 case FLASH_5752VENDOR_ST_M45PE20:
11766 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11768 case FLASH_5752VENDOR_ST_M45PE40:
11769 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11774 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11778 tg3_nvram_get_pagesize(tp, nvcfg1);
11779 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11780 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11784 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11788 nvcfg1 = tr32(NVRAM_CFG1);
11790 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11791 case FLASH_5717VENDOR_ATMEL_EEPROM:
11792 case FLASH_5717VENDOR_MICRO_EEPROM:
11793 tp->nvram_jedecnum = JEDEC_ATMEL;
11794 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11795 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11797 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11798 tw32(NVRAM_CFG1, nvcfg1);
11800 case FLASH_5717VENDOR_ATMEL_MDB011D:
11801 case FLASH_5717VENDOR_ATMEL_ADB011B:
11802 case FLASH_5717VENDOR_ATMEL_ADB011D:
11803 case FLASH_5717VENDOR_ATMEL_MDB021D:
11804 case FLASH_5717VENDOR_ATMEL_ADB021B:
11805 case FLASH_5717VENDOR_ATMEL_ADB021D:
11806 case FLASH_5717VENDOR_ATMEL_45USPT:
11807 tp->nvram_jedecnum = JEDEC_ATMEL;
11808 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11809 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11811 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11812 case FLASH_5717VENDOR_ATMEL_MDB021D:
11813 case FLASH_5717VENDOR_ATMEL_ADB021B:
11814 case FLASH_5717VENDOR_ATMEL_ADB021D:
11815 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11818 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11822 case FLASH_5717VENDOR_ST_M_M25PE10:
11823 case FLASH_5717VENDOR_ST_A_M25PE10:
11824 case FLASH_5717VENDOR_ST_M_M45PE10:
11825 case FLASH_5717VENDOR_ST_A_M45PE10:
11826 case FLASH_5717VENDOR_ST_M_M25PE20:
11827 case FLASH_5717VENDOR_ST_A_M25PE20:
11828 case FLASH_5717VENDOR_ST_M_M45PE20:
11829 case FLASH_5717VENDOR_ST_A_M45PE20:
11830 case FLASH_5717VENDOR_ST_25USPT:
11831 case FLASH_5717VENDOR_ST_45USPT:
11832 tp->nvram_jedecnum = JEDEC_ST;
11833 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11834 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11836 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11837 case FLASH_5717VENDOR_ST_M_M25PE20:
11838 case FLASH_5717VENDOR_ST_A_M25PE20:
11839 case FLASH_5717VENDOR_ST_M_M45PE20:
11840 case FLASH_5717VENDOR_ST_A_M45PE20:
11841 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11844 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11849 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11853 tg3_nvram_get_pagesize(tp, nvcfg1);
11854 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11855 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11858 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11859 static void __devinit tg3_nvram_init(struct tg3 *tp)
11861 tw32_f(GRC_EEPROM_ADDR,
11862 (EEPROM_ADDR_FSM_RESET |
11863 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11864 EEPROM_ADDR_CLKPERD_SHIFT)));
11868 /* Enable seeprom accesses. */
11869 tw32_f(GRC_LOCAL_CTRL,
11870 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11873 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11874 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11875 tp->tg3_flags |= TG3_FLAG_NVRAM;
11877 if (tg3_nvram_lock(tp)) {
11878 netdev_warn(tp->dev,
11879 "Cannot get nvram lock, %s failed\n",
11883 tg3_enable_nvram_access(tp);
11885 tp->nvram_size = 0;
11887 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11888 tg3_get_5752_nvram_info(tp);
11889 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11890 tg3_get_5755_nvram_info(tp);
11891 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11892 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11894 tg3_get_5787_nvram_info(tp);
11895 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11896 tg3_get_5761_nvram_info(tp);
11897 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11898 tg3_get_5906_nvram_info(tp);
11899 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11900 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11901 tg3_get_57780_nvram_info(tp);
11902 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11903 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11904 tg3_get_5717_nvram_info(tp);
11906 tg3_get_nvram_info(tp);
11908 if (tp->nvram_size == 0)
11909 tg3_get_nvram_size(tp);
11911 tg3_disable_nvram_access(tp);
11912 tg3_nvram_unlock(tp);
11915 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11917 tg3_get_eeprom_size(tp);
11921 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11922 u32 offset, u32 len, u8 *buf)
11927 for (i = 0; i < len; i += 4) {
11933 memcpy(&data, buf + i, 4);
11936 * The SEEPROM interface expects the data to always be opposite
11937 * the native endian format. We accomplish this by reversing
11938 * all the operations that would have been performed on the
11939 * data from a call to tg3_nvram_read_be32().
11941 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11943 val = tr32(GRC_EEPROM_ADDR);
11944 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11946 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11948 tw32(GRC_EEPROM_ADDR, val |
11949 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11950 (addr & EEPROM_ADDR_ADDR_MASK) |
11951 EEPROM_ADDR_START |
11952 EEPROM_ADDR_WRITE);
11954 for (j = 0; j < 1000; j++) {
11955 val = tr32(GRC_EEPROM_ADDR);
11957 if (val & EEPROM_ADDR_COMPLETE)
11961 if (!(val & EEPROM_ADDR_COMPLETE)) {
11970 /* offset and length are dword aligned */
11971 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11975 u32 pagesize = tp->nvram_pagesize;
11976 u32 pagemask = pagesize - 1;
11980 tmp = kmalloc(pagesize, GFP_KERNEL);
11986 u32 phy_addr, page_off, size;
11988 phy_addr = offset & ~pagemask;
11990 for (j = 0; j < pagesize; j += 4) {
11991 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11992 (__be32 *) (tmp + j));
11999 page_off = offset & pagemask;
12006 memcpy(tmp + page_off, buf, size);
12008 offset = offset + (pagesize - page_off);
12010 tg3_enable_nvram_access(tp);
12013 * Before we can erase the flash page, we need
12014 * to issue a special "write enable" command.
12016 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12018 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12021 /* Erase the target page */
12022 tw32(NVRAM_ADDR, phy_addr);
12024 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12025 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12027 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12030 /* Issue another write enable to start the write. */
12031 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12033 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12036 for (j = 0; j < pagesize; j += 4) {
12039 data = *((__be32 *) (tmp + j));
12041 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12043 tw32(NVRAM_ADDR, phy_addr + j);
12045 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12049 nvram_cmd |= NVRAM_CMD_FIRST;
12050 else if (j == (pagesize - 4))
12051 nvram_cmd |= NVRAM_CMD_LAST;
12053 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12060 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12061 tg3_nvram_exec_cmd(tp, nvram_cmd);
12068 /* offset and length are dword aligned */
12069 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12074 for (i = 0; i < len; i += 4, offset += 4) {
12075 u32 page_off, phy_addr, nvram_cmd;
12078 memcpy(&data, buf + i, 4);
12079 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12081 page_off = offset % tp->nvram_pagesize;
12083 phy_addr = tg3_nvram_phys_addr(tp, offset);
12085 tw32(NVRAM_ADDR, phy_addr);
12087 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12089 if (page_off == 0 || i == 0)
12090 nvram_cmd |= NVRAM_CMD_FIRST;
12091 if (page_off == (tp->nvram_pagesize - 4))
12092 nvram_cmd |= NVRAM_CMD_LAST;
12094 if (i == (len - 4))
12095 nvram_cmd |= NVRAM_CMD_LAST;
12097 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12098 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
12099 (tp->nvram_jedecnum == JEDEC_ST) &&
12100 (nvram_cmd & NVRAM_CMD_FIRST)) {
12102 if ((ret = tg3_nvram_exec_cmd(tp,
12103 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12108 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12109 /* We always do complete word writes to eeprom. */
12110 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12113 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12119 /* offset and length are dword aligned */
12120 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12124 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12125 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12126 ~GRC_LCLCTRL_GPIO_OUTPUT1);
12130 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12131 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12135 ret = tg3_nvram_lock(tp);
12139 tg3_enable_nvram_access(tp);
12140 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
12141 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
12142 tw32(NVRAM_WRITE1, 0x406);
12144 grc_mode = tr32(GRC_MODE);
12145 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12147 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
12148 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
12150 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12153 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12157 grc_mode = tr32(GRC_MODE);
12158 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12160 tg3_disable_nvram_access(tp);
12161 tg3_nvram_unlock(tp);
12164 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
12165 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12172 struct subsys_tbl_ent {
12173 u16 subsys_vendor, subsys_devid;
12177 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12178 /* Broadcom boards. */
12179 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12180 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12181 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12182 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12183 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12184 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12185 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12186 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12187 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12188 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12189 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12190 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12191 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12192 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12193 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12194 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12195 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12196 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12197 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12198 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12199 { TG3PCI_SUBVENDOR_ID_BROADCOM,
12200 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12203 { TG3PCI_SUBVENDOR_ID_3COM,
12204 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12205 { TG3PCI_SUBVENDOR_ID_3COM,
12206 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12207 { TG3PCI_SUBVENDOR_ID_3COM,
12208 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12209 { TG3PCI_SUBVENDOR_ID_3COM,
12210 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12211 { TG3PCI_SUBVENDOR_ID_3COM,
12212 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12215 { TG3PCI_SUBVENDOR_ID_DELL,
12216 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12217 { TG3PCI_SUBVENDOR_ID_DELL,
12218 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12219 { TG3PCI_SUBVENDOR_ID_DELL,
12220 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12221 { TG3PCI_SUBVENDOR_ID_DELL,
12222 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12224 /* Compaq boards. */
12225 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12226 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12227 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12228 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12229 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12230 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12231 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12232 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12233 { TG3PCI_SUBVENDOR_ID_COMPAQ,
12234 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12237 { TG3PCI_SUBVENDOR_ID_IBM,
12238 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12241 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12245 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12246 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12247 tp->pdev->subsystem_vendor) &&
12248 (subsys_id_to_phy_id[i].subsys_devid ==
12249 tp->pdev->subsystem_device))
12250 return &subsys_id_to_phy_id[i];
12255 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12260 /* On some early chips the SRAM cannot be accessed in D3hot state,
12261 * so need make sure we're in D0.
12263 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12264 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12265 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12268 /* Make sure register accesses (indirect or otherwise)
12269 * will function correctly.
12271 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12272 tp->misc_host_ctrl);
12274 /* The memory arbiter has to be enabled in order for SRAM accesses
12275 * to succeed. Normally on powerup the tg3 chip firmware will make
12276 * sure it is enabled, but other entities such as system netboot
12277 * code might disable it.
12279 val = tr32(MEMARB_MODE);
12280 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12282 tp->phy_id = TG3_PHY_ID_INVALID;
12283 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12285 /* Assume an onboard device and WOL capable by default. */
12286 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12288 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12289 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12290 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12291 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12293 val = tr32(VCPU_CFGSHDW);
12294 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12295 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12296 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12297 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12298 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12302 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12303 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12304 u32 nic_cfg, led_cfg;
12305 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12306 int eeprom_phy_serdes = 0;
12308 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12309 tp->nic_sram_data_cfg = nic_cfg;
12311 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12312 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12313 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12314 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12315 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12316 (ver > 0) && (ver < 0x100))
12317 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12320 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12322 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12323 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12324 eeprom_phy_serdes = 1;
12326 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12327 if (nic_phy_id != 0) {
12328 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12329 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12331 eeprom_phy_id = (id1 >> 16) << 10;
12332 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12333 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12337 tp->phy_id = eeprom_phy_id;
12338 if (eeprom_phy_serdes) {
12339 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12340 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12342 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12345 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12346 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12347 SHASTA_EXT_LED_MODE_MASK);
12349 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12353 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12354 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12357 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12358 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12361 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12362 tp->led_ctrl = LED_CTRL_MODE_MAC;
12364 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12365 * read on some older 5700/5701 bootcode.
12367 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12369 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12371 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12375 case SHASTA_EXT_LED_SHARED:
12376 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12377 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12378 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12379 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12380 LED_CTRL_MODE_PHY_2);
12383 case SHASTA_EXT_LED_MAC:
12384 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12387 case SHASTA_EXT_LED_COMBO:
12388 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12389 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12390 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12391 LED_CTRL_MODE_PHY_2);
12396 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12397 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12398 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12399 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12401 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12402 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12404 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12405 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12406 if ((tp->pdev->subsystem_vendor ==
12407 PCI_VENDOR_ID_ARIMA) &&
12408 (tp->pdev->subsystem_device == 0x205a ||
12409 tp->pdev->subsystem_device == 0x2063))
12410 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12412 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12413 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12416 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12417 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12418 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12419 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12422 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12423 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12424 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12426 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12427 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12428 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12430 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12431 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12432 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12434 if (cfg2 & (1 << 17))
12435 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12437 /* serdes signal pre-emphasis in register 0x590 set by */
12438 /* bootcode if bit 18 is set */
12439 if (cfg2 & (1 << 18))
12440 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12442 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
12443 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12444 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12445 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12446 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12448 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12449 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12450 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
12453 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12454 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12455 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12458 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12459 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12460 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12461 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12462 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12463 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12466 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
12467 device_set_wakeup_enable(&tp->pdev->dev,
12468 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12470 device_set_wakeup_capable(&tp->pdev->dev, false);
12473 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12478 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12479 tw32(OTP_CTRL, cmd);
12481 /* Wait for up to 1 ms for command to execute. */
12482 for (i = 0; i < 100; i++) {
12483 val = tr32(OTP_STATUS);
12484 if (val & OTP_STATUS_CMD_DONE)
12489 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12492 /* Read the gphy configuration from the OTP region of the chip. The gphy
12493 * configuration is a 32-bit value that straddles the alignment boundary.
12494 * We do two 32-bit reads and then shift and merge the results.
12496 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12498 u32 bhalf_otp, thalf_otp;
12500 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12502 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12505 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12507 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12510 thalf_otp = tr32(OTP_READ_DATA);
12512 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12514 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12517 bhalf_otp = tr32(OTP_READ_DATA);
12519 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12522 static int __devinit tg3_phy_probe(struct tg3 *tp)
12524 u32 hw_phy_id_1, hw_phy_id_2;
12525 u32 hw_phy_id, hw_phy_id_masked;
12528 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12529 return tg3_phy_init(tp);
12531 /* Reading the PHY ID register can conflict with ASF
12532 * firmware access to the PHY hardware.
12535 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12536 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12537 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12539 /* Now read the physical PHY_ID from the chip and verify
12540 * that it is sane. If it doesn't look good, we fall back
12541 * to either the hard-coded table based PHY_ID and failing
12542 * that the value found in the eeprom area.
12544 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12545 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12547 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12548 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12549 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12551 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12554 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12555 tp->phy_id = hw_phy_id;
12556 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12557 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12559 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12561 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12562 /* Do nothing, phy ID already set up in
12563 * tg3_get_eeprom_hw_cfg().
12566 struct subsys_tbl_ent *p;
12568 /* No eeprom signature? Try the hardcoded
12569 * subsys device table.
12571 p = tg3_lookup_by_subsys(tp);
12575 tp->phy_id = p->phy_id;
12577 tp->phy_id == TG3_PHY_ID_BCM8002)
12578 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12582 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12583 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12584 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12585 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12586 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12587 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12589 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12590 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12591 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12592 u32 bmsr, adv_reg, tg3_ctrl, mask;
12594 tg3_readphy(tp, MII_BMSR, &bmsr);
12595 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12596 (bmsr & BMSR_LSTATUS))
12597 goto skip_phy_reset;
12599 err = tg3_phy_reset(tp);
12603 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12604 ADVERTISE_100HALF | ADVERTISE_100FULL |
12605 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12607 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
12608 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12609 MII_TG3_CTRL_ADV_1000_FULL);
12610 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12611 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12612 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12613 MII_TG3_CTRL_ENABLE_AS_MASTER);
12616 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12617 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12618 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12619 if (!tg3_copper_is_advertising_all(tp, mask)) {
12620 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12622 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12623 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12625 tg3_writephy(tp, MII_BMCR,
12626 BMCR_ANENABLE | BMCR_ANRESTART);
12628 tg3_phy_set_wirespeed(tp);
12630 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12631 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12632 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12636 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12637 err = tg3_init_5401phy_dsp(tp);
12641 err = tg3_init_5401phy_dsp(tp);
12644 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12645 tp->link_config.advertising =
12646 (ADVERTISED_1000baseT_Half |
12647 ADVERTISED_1000baseT_Full |
12648 ADVERTISED_Autoneg |
12650 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
12651 tp->link_config.advertising &=
12652 ~(ADVERTISED_1000baseT_Half |
12653 ADVERTISED_1000baseT_Full);
12658 static void __devinit tg3_read_vpd(struct tg3 *tp)
12661 unsigned int block_end, rosize, len;
12665 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12666 tg3_nvram_read(tp, 0x0, &magic))
12669 vpd_data = kmalloc(TG3_NVM_VPD_LEN, GFP_KERNEL);
12673 if (magic == TG3_EEPROM_MAGIC) {
12674 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12677 /* The data is in little-endian format in NVRAM.
12678 * Use the big-endian read routines to preserve
12679 * the byte order as it exists in NVRAM.
12681 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12682 goto out_not_found;
12684 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12688 unsigned int pos = 0;
12690 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12691 cnt = pci_read_vpd(tp->pdev, pos,
12692 TG3_NVM_VPD_LEN - pos,
12694 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12697 goto out_not_found;
12699 if (pos != TG3_NVM_VPD_LEN)
12700 goto out_not_found;
12703 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12704 PCI_VPD_LRDT_RO_DATA);
12706 goto out_not_found;
12708 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12709 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12710 i += PCI_VPD_LRDT_TAG_SIZE;
12712 if (block_end > TG3_NVM_VPD_LEN)
12713 goto out_not_found;
12715 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12716 PCI_VPD_RO_KEYWORD_MFR_ID);
12718 len = pci_vpd_info_field_size(&vpd_data[j]);
12720 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12721 if (j + len > block_end || len != 4 ||
12722 memcmp(&vpd_data[j], "1028", 4))
12725 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12726 PCI_VPD_RO_KEYWORD_VENDOR0);
12730 len = pci_vpd_info_field_size(&vpd_data[j]);
12732 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12733 if (j + len > block_end)
12736 memcpy(tp->fw_ver, &vpd_data[j], len);
12737 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12741 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12742 PCI_VPD_RO_KEYWORD_PARTNO);
12744 goto out_not_found;
12746 len = pci_vpd_info_field_size(&vpd_data[i]);
12748 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12749 if (len > TG3_BPN_SIZE ||
12750 (len + i) > TG3_NVM_VPD_LEN)
12751 goto out_not_found;
12753 memcpy(tp->board_part_number, &vpd_data[i], len);
12757 if (tp->board_part_number[0])
12761 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12762 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
12763 strcpy(tp->board_part_number, "BCM5717");
12764 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
12765 strcpy(tp->board_part_number, "BCM5718");
12768 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
12769 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12770 strcpy(tp->board_part_number, "BCM57780");
12771 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12772 strcpy(tp->board_part_number, "BCM57760");
12773 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12774 strcpy(tp->board_part_number, "BCM57790");
12775 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12776 strcpy(tp->board_part_number, "BCM57788");
12779 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
12780 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12781 strcpy(tp->board_part_number, "BCM57761");
12782 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12783 strcpy(tp->board_part_number, "BCM57765");
12784 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12785 strcpy(tp->board_part_number, "BCM57781");
12786 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12787 strcpy(tp->board_part_number, "BCM57785");
12788 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12789 strcpy(tp->board_part_number, "BCM57791");
12790 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12791 strcpy(tp->board_part_number, "BCM57795");
12794 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12795 strcpy(tp->board_part_number, "BCM95906");
12798 strcpy(tp->board_part_number, "none");
12802 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12806 if (tg3_nvram_read(tp, offset, &val) ||
12807 (val & 0xfc000000) != 0x0c000000 ||
12808 tg3_nvram_read(tp, offset + 4, &val) ||
12815 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12817 u32 val, offset, start, ver_offset;
12819 bool newver = false;
12821 if (tg3_nvram_read(tp, 0xc, &offset) ||
12822 tg3_nvram_read(tp, 0x4, &start))
12825 offset = tg3_nvram_logical_addr(tp, offset);
12827 if (tg3_nvram_read(tp, offset, &val))
12830 if ((val & 0xfc000000) == 0x0c000000) {
12831 if (tg3_nvram_read(tp, offset + 4, &val))
12838 dst_off = strlen(tp->fw_ver);
12841 if (TG3_VER_SIZE - dst_off < 16 ||
12842 tg3_nvram_read(tp, offset + 8, &ver_offset))
12845 offset = offset + ver_offset - start;
12846 for (i = 0; i < 16; i += 4) {
12848 if (tg3_nvram_read_be32(tp, offset + i, &v))
12851 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12856 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12859 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12860 TG3_NVM_BCVER_MAJSFT;
12861 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12862 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12863 "v%d.%02d", major, minor);
12867 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12869 u32 val, major, minor;
12871 /* Use native endian representation */
12872 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12875 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12876 TG3_NVM_HWSB_CFG1_MAJSFT;
12877 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12878 TG3_NVM_HWSB_CFG1_MINSFT;
12880 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12883 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12885 u32 offset, major, minor, build;
12887 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12889 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12892 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12893 case TG3_EEPROM_SB_REVISION_0:
12894 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12896 case TG3_EEPROM_SB_REVISION_2:
12897 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12899 case TG3_EEPROM_SB_REVISION_3:
12900 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12902 case TG3_EEPROM_SB_REVISION_4:
12903 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12905 case TG3_EEPROM_SB_REVISION_5:
12906 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12908 case TG3_EEPROM_SB_REVISION_6:
12909 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
12915 if (tg3_nvram_read(tp, offset, &val))
12918 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12919 TG3_EEPROM_SB_EDH_BLD_SHFT;
12920 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12921 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12922 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12924 if (minor > 99 || build > 26)
12927 offset = strlen(tp->fw_ver);
12928 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12929 " v%d.%02d", major, minor);
12932 offset = strlen(tp->fw_ver);
12933 if (offset < TG3_VER_SIZE - 1)
12934 tp->fw_ver[offset] = 'a' + build - 1;
12938 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12940 u32 val, offset, start;
12943 for (offset = TG3_NVM_DIR_START;
12944 offset < TG3_NVM_DIR_END;
12945 offset += TG3_NVM_DIRENT_SIZE) {
12946 if (tg3_nvram_read(tp, offset, &val))
12949 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12953 if (offset == TG3_NVM_DIR_END)
12956 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12957 start = 0x08000000;
12958 else if (tg3_nvram_read(tp, offset - 4, &start))
12961 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12962 !tg3_fw_img_is_valid(tp, offset) ||
12963 tg3_nvram_read(tp, offset + 8, &val))
12966 offset += val - start;
12968 vlen = strlen(tp->fw_ver);
12970 tp->fw_ver[vlen++] = ',';
12971 tp->fw_ver[vlen++] = ' ';
12973 for (i = 0; i < 4; i++) {
12975 if (tg3_nvram_read_be32(tp, offset, &v))
12978 offset += sizeof(v);
12980 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12981 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12985 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12990 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12996 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12997 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
13000 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13001 if (apedata != APE_SEG_SIG_MAGIC)
13004 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13005 if (!(apedata & APE_FW_STATUS_READY))
13008 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13010 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13011 tp->tg3_flags3 |= TG3_FLG3_APE_HAS_NCSI;
13017 vlen = strlen(tp->fw_ver);
13019 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13021 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13022 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13023 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13024 (apedata & APE_FW_VERSION_BLDMSK));
13027 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13030 bool vpd_vers = false;
13032 if (tp->fw_ver[0] != 0)
13035 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
13036 strcat(tp->fw_ver, "sb");
13040 if (tg3_nvram_read(tp, 0, &val))
13043 if (val == TG3_EEPROM_MAGIC)
13044 tg3_read_bc_ver(tp);
13045 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13046 tg3_read_sb_ver(tp, val);
13047 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13048 tg3_read_hwsb_ver(tp);
13052 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
13053 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
13056 tg3_read_mgmtfw_ver(tp);
13059 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13062 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13064 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
13066 dev->vlan_features |= flags;
13069 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13074 else if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
13075 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13081 DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = {
13082 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13083 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13084 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13088 static int __devinit tg3_get_invariants(struct tg3 *tp)
13091 u32 pci_state_reg, grc_misc_cfg;
13096 /* Force memory write invalidate off. If we leave it on,
13097 * then on 5700_BX chips we have to enable a workaround.
13098 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13099 * to match the cacheline size. The Broadcom driver have this
13100 * workaround but turns MWI off all the times so never uses
13101 * it. This seems to suggest that the workaround is insufficient.
13103 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13104 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13105 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13107 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13108 * has the register indirect write enable bit set before
13109 * we try to access any of the MMIO registers. It is also
13110 * critical that the PCI-X hw workaround situation is decided
13111 * before that as well.
13113 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13116 tp->pci_chip_rev_id = (misc_ctrl_reg >>
13117 MISC_HOST_CTRL_CHIPREV_SHIFT);
13118 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13119 u32 prod_id_asic_rev;
13121 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13122 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13123 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
13124 pci_read_config_dword(tp->pdev,
13125 TG3PCI_GEN2_PRODID_ASICREV,
13126 &prod_id_asic_rev);
13127 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13128 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13129 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13130 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13131 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13132 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13133 pci_read_config_dword(tp->pdev,
13134 TG3PCI_GEN15_PRODID_ASICREV,
13135 &prod_id_asic_rev);
13137 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13138 &prod_id_asic_rev);
13140 tp->pci_chip_rev_id = prod_id_asic_rev;
13143 /* Wrong chip ID in 5752 A0. This code can be removed later
13144 * as A0 is not in production.
13146 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13147 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13149 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13150 * we need to disable memory and use config. cycles
13151 * only to access all registers. The 5702/03 chips
13152 * can mistakenly decode the special cycles from the
13153 * ICH chipsets as memory write cycles, causing corruption
13154 * of register and memory space. Only certain ICH bridges
13155 * will drive special cycles with non-zero data during the
13156 * address phase which can fall within the 5703's address
13157 * range. This is not an ICH bug as the PCI spec allows
13158 * non-zero address during special cycles. However, only
13159 * these ICH bridges are known to drive non-zero addresses
13160 * during special cycles.
13162 * Since special cycles do not cross PCI bridges, we only
13163 * enable this workaround if the 5703 is on the secondary
13164 * bus of these ICH bridges.
13166 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13167 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13168 static struct tg3_dev_id {
13172 } ich_chipsets[] = {
13173 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13175 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13177 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13179 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13183 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13184 struct pci_dev *bridge = NULL;
13186 while (pci_id->vendor != 0) {
13187 bridge = pci_get_device(pci_id->vendor, pci_id->device,
13193 if (pci_id->rev != PCI_ANY_ID) {
13194 if (bridge->revision > pci_id->rev)
13197 if (bridge->subordinate &&
13198 (bridge->subordinate->number ==
13199 tp->pdev->bus->number)) {
13201 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
13202 pci_dev_put(bridge);
13208 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
13209 static struct tg3_dev_id {
13212 } bridge_chipsets[] = {
13213 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13214 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13217 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13218 struct pci_dev *bridge = NULL;
13220 while (pci_id->vendor != 0) {
13221 bridge = pci_get_device(pci_id->vendor,
13228 if (bridge->subordinate &&
13229 (bridge->subordinate->number <=
13230 tp->pdev->bus->number) &&
13231 (bridge->subordinate->subordinate >=
13232 tp->pdev->bus->number)) {
13233 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
13234 pci_dev_put(bridge);
13240 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13241 * DMA addresses > 40-bit. This bridge may have other additional
13242 * 57xx devices behind it in some 4-port NIC designs for example.
13243 * Any tg3 device found behind the bridge will also need the 40-bit
13246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13248 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13249 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13250 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13252 struct pci_dev *bridge = NULL;
13255 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13256 PCI_DEVICE_ID_SERVERWORKS_EPB,
13258 if (bridge && bridge->subordinate &&
13259 (bridge->subordinate->number <=
13260 tp->pdev->bus->number) &&
13261 (bridge->subordinate->subordinate >=
13262 tp->pdev->bus->number)) {
13263 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13264 pci_dev_put(bridge);
13270 /* Initialize misc host control in PCI block. */
13271 tp->misc_host_ctrl |= (misc_ctrl_reg &
13272 MISC_HOST_CTRL_CHIPREV);
13273 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13274 tp->misc_host_ctrl);
13276 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13277 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13278 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13279 tp->pdev_peer = tg3_find_peer(tp);
13281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13282 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13283 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13284 tp->tg3_flags3 |= TG3_FLG3_5717_PLUS;
13286 /* Intentionally exclude ASIC_REV_5906 */
13287 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13288 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13289 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13290 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13291 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13292 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13293 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13294 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13299 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13300 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13301 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13303 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13304 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13305 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13307 /* 5700 B0 chips do not support checksumming correctly due
13308 * to hardware bugs.
13310 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13311 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13313 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13315 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13316 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13317 features |= NETIF_F_IPV6_CSUM;
13318 tp->dev->features |= features;
13319 vlan_features_add(tp->dev, features);
13322 /* Determine TSO capabilities */
13323 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13324 ; /* Do nothing. HW bug. */
13325 else if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
13326 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13327 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13328 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13329 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13330 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13331 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13332 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13333 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13334 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13335 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13336 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13337 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13338 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13339 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13340 tp->fw_needed = FIRMWARE_TG3TSO5;
13342 tp->fw_needed = FIRMWARE_TG3TSO;
13347 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13348 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13349 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13350 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13351 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13352 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13353 tp->pdev_peer == tp->pdev))
13354 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13356 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13357 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13358 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13361 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13362 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13363 tp->irq_max = TG3_IRQ_MAX_VECS;
13367 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13368 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13369 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13370 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13371 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13372 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13373 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13376 if ((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) &&
13377 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13378 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13380 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13381 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13382 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13383 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13385 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13388 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13389 if (tp->pcie_cap != 0) {
13392 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13394 tp->pcie_readrq = 4096;
13395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13396 tp->pcie_readrq = 2048;
13398 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13400 pci_read_config_word(tp->pdev,
13401 tp->pcie_cap + PCI_EXP_LNKCTL,
13403 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13404 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13405 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13406 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13407 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13408 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13409 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13410 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13411 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13412 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13414 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13415 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13416 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13417 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13418 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13419 if (!tp->pcix_cap) {
13420 dev_err(&tp->pdev->dev,
13421 "Cannot find PCI-X capability, aborting\n");
13425 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13426 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13429 /* If we have an AMD 762 or VIA K8T800 chipset, write
13430 * reordering to the mailbox registers done by the host
13431 * controller can cause major troubles. We read back from
13432 * every mailbox register write to force the writes to be
13433 * posted to the chip in order.
13435 if (pci_dev_present(write_reorder_chipsets) &&
13436 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13437 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13439 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13440 &tp->pci_cacheline_sz);
13441 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13442 &tp->pci_lat_timer);
13443 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13444 tp->pci_lat_timer < 64) {
13445 tp->pci_lat_timer = 64;
13446 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13447 tp->pci_lat_timer);
13450 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13451 /* 5700 BX chips need to have their TX producer index
13452 * mailboxes written twice to workaround a bug.
13454 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13456 /* If we are in PCI-X mode, enable register write workaround.
13458 * The workaround is to use indirect register accesses
13459 * for all chip writes not to mailbox registers.
13461 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13464 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13466 /* The chip can have it's power management PCI config
13467 * space registers clobbered due to this bug.
13468 * So explicitly force the chip into D0 here.
13470 pci_read_config_dword(tp->pdev,
13471 tp->pm_cap + PCI_PM_CTRL,
13473 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13474 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13475 pci_write_config_dword(tp->pdev,
13476 tp->pm_cap + PCI_PM_CTRL,
13479 /* Also, force SERR#/PERR# in PCI command. */
13480 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13481 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13482 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13486 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13487 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13488 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13489 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13491 /* Chip-specific fixup from Broadcom driver */
13492 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13493 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13494 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13495 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13498 /* Default fast path register access methods */
13499 tp->read32 = tg3_read32;
13500 tp->write32 = tg3_write32;
13501 tp->read32_mbox = tg3_read32;
13502 tp->write32_mbox = tg3_write32;
13503 tp->write32_tx_mbox = tg3_write32;
13504 tp->write32_rx_mbox = tg3_write32;
13506 /* Various workaround register access methods */
13507 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13508 tp->write32 = tg3_write_indirect_reg32;
13509 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13510 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13511 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13513 * Back to back register writes can cause problems on these
13514 * chips, the workaround is to read back all reg writes
13515 * except those to mailbox regs.
13517 * See tg3_write_indirect_reg32().
13519 tp->write32 = tg3_write_flush_reg32;
13522 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13523 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13524 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13525 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13526 tp->write32_rx_mbox = tg3_write_flush_reg32;
13529 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13530 tp->read32 = tg3_read_indirect_reg32;
13531 tp->write32 = tg3_write_indirect_reg32;
13532 tp->read32_mbox = tg3_read_indirect_mbox;
13533 tp->write32_mbox = tg3_write_indirect_mbox;
13534 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13535 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13540 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13541 pci_cmd &= ~PCI_COMMAND_MEMORY;
13542 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13544 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13545 tp->read32_mbox = tg3_read32_mbox_5906;
13546 tp->write32_mbox = tg3_write32_mbox_5906;
13547 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13548 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13551 if (tp->write32 == tg3_write_indirect_reg32 ||
13552 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13553 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13554 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13555 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13557 /* Get eeprom hw config before calling tg3_set_power_state().
13558 * In particular, the TG3_FLG2_IS_NIC flag must be
13559 * determined before calling tg3_set_power_state() so that
13560 * we know whether or not to switch out of Vaux power.
13561 * When the flag is set, it means that GPIO1 is used for eeprom
13562 * write protect and also implies that it is a LOM where GPIOs
13563 * are not used to switch power.
13565 tg3_get_eeprom_hw_cfg(tp);
13567 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13568 /* Allow reads and writes to the
13569 * APE register and memory space.
13571 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13572 PCISTATE_ALLOW_APE_SHMEM_WR |
13573 PCISTATE_ALLOW_APE_PSPACE_WR;
13574 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13579 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13580 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13581 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13582 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13583 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13585 /* Set up tp->grc_local_ctrl before calling tg_power_up().
13586 * GPIO1 driven high will bring 5700's external PHY out of reset.
13587 * It is also used as eeprom write protect on LOMs.
13589 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13590 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13591 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13592 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13593 GRC_LCLCTRL_GPIO_OUTPUT1);
13594 /* Unused GPIO3 must be driven as output on 5752 because there
13595 * are no pull-up resistors on unused GPIO pins.
13597 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13598 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13600 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13601 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13602 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13603 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13605 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13606 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13607 /* Turn off the debug UART. */
13608 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13609 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13610 /* Keep VMain power. */
13611 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13612 GRC_LCLCTRL_GPIO_OUTPUT0;
13615 /* Force the chip into D0. */
13616 err = tg3_power_up(tp);
13618 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13622 /* Derive initial jumbo mode from MTU assigned in
13623 * ether_setup() via the alloc_etherdev() call
13625 if (tp->dev->mtu > ETH_DATA_LEN &&
13626 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13627 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13629 /* Determine WakeOnLan speed to use. */
13630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13631 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13632 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13633 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13634 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13636 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13640 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13642 /* A few boards don't want Ethernet@WireSpeed phy feature */
13643 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13644 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13645 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13646 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13647 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13648 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13649 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13651 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13652 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13653 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13654 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13655 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13657 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13658 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13659 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13660 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13661 !(tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
13662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13663 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13665 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13666 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13667 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13668 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13669 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13670 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13672 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13676 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13677 tp->phy_otp = tg3_read_otp_phycfg(tp);
13678 if (tp->phy_otp == 0)
13679 tp->phy_otp = TG3_OTP_DEFAULT;
13682 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13683 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13685 tp->mi_mode = MAC_MI_MODE_BASE;
13687 tp->coalesce_mode = 0;
13688 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13689 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13690 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13692 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13693 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13694 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13696 err = tg3_mdio_init(tp);
13700 /* Initialize data/descriptor byte/word swapping. */
13701 val = tr32(GRC_MODE);
13702 val &= GRC_MODE_HOST_STACKUP;
13703 tw32(GRC_MODE, val | tp->grc_mode);
13705 tg3_switch_clocks(tp);
13707 /* Clear this out for sanity. */
13708 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13710 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13712 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13713 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13714 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13716 if (chiprevid == CHIPREV_ID_5701_A0 ||
13717 chiprevid == CHIPREV_ID_5701_B0 ||
13718 chiprevid == CHIPREV_ID_5701_B2 ||
13719 chiprevid == CHIPREV_ID_5701_B5) {
13720 void __iomem *sram_base;
13722 /* Write some dummy words into the SRAM status block
13723 * area, see if it reads back correctly. If the return
13724 * value is bad, force enable the PCIX workaround.
13726 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13728 writel(0x00000000, sram_base);
13729 writel(0x00000000, sram_base + 4);
13730 writel(0xffffffff, sram_base + 4);
13731 if (readl(sram_base) != 0x00000000)
13732 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13737 tg3_nvram_init(tp);
13739 grc_misc_cfg = tr32(GRC_MISC_CFG);
13740 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13743 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13744 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13745 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13747 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13748 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13749 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13750 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13751 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13752 HOSTCC_MODE_CLRTICK_TXBD);
13754 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13755 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13756 tp->misc_host_ctrl);
13759 /* Preserve the APE MAC_MODE bits */
13760 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13761 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13763 tp->mac_mode = TG3_DEF_MAC_MODE;
13765 /* these are limited to 10/100 only */
13766 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13767 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13768 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13769 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13770 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13771 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13772 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13773 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13774 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13775 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13776 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13777 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13778 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13779 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13780 (tp->phy_flags & TG3_PHYFLG_IS_FET))
13781 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
13783 err = tg3_phy_probe(tp);
13785 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13786 /* ... but do not return immediately ... */
13791 tg3_read_fw_ver(tp);
13793 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
13794 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13796 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13797 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13799 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
13802 /* 5700 {AX,BX} chips have a broken status block link
13803 * change bit implementation, so we must use the
13804 * status register in those cases.
13806 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13807 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13809 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13811 /* The led_ctrl is set during tg3_phy_probe, here we might
13812 * have to force the link status polling mechanism based
13813 * upon subsystem IDs.
13815 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13816 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13817 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
13818 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
13819 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13822 /* For all SERDES we poll the MAC status register. */
13823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13824 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13826 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13828 tp->rx_offset = NET_IP_ALIGN;
13829 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13831 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13833 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13834 tp->rx_copy_thresh = ~(u16)0;
13838 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
13839 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
13840 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
13842 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
13844 /* Increment the rx prod index on the rx std ring by at most
13845 * 8 for these chips to workaround hw errata.
13847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13849 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13850 tp->rx_std_max_post = 8;
13852 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13853 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13854 PCIE_PWR_MGMT_L1_THRESH_MSK;
13859 #ifdef CONFIG_SPARC
13860 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13862 struct net_device *dev = tp->dev;
13863 struct pci_dev *pdev = tp->pdev;
13864 struct device_node *dp = pci_device_to_OF_node(pdev);
13865 const unsigned char *addr;
13868 addr = of_get_property(dp, "local-mac-address", &len);
13869 if (addr && len == 6) {
13870 memcpy(dev->dev_addr, addr, 6);
13871 memcpy(dev->perm_addr, dev->dev_addr, 6);
13877 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13879 struct net_device *dev = tp->dev;
13881 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13882 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13887 static int __devinit tg3_get_device_address(struct tg3 *tp)
13889 struct net_device *dev = tp->dev;
13890 u32 hi, lo, mac_offset;
13893 #ifdef CONFIG_SPARC
13894 if (!tg3_get_macaddr_sparc(tp))
13899 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13900 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13901 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13903 if (tg3_nvram_lock(tp))
13904 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13906 tg3_nvram_unlock(tp);
13907 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13908 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13909 if (PCI_FUNC(tp->pdev->devfn) & 1)
13911 if (PCI_FUNC(tp->pdev->devfn) > 1)
13912 mac_offset += 0x18c;
13913 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13916 /* First try to get it from MAC address mailbox. */
13917 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13918 if ((hi >> 16) == 0x484b) {
13919 dev->dev_addr[0] = (hi >> 8) & 0xff;
13920 dev->dev_addr[1] = (hi >> 0) & 0xff;
13922 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13923 dev->dev_addr[2] = (lo >> 24) & 0xff;
13924 dev->dev_addr[3] = (lo >> 16) & 0xff;
13925 dev->dev_addr[4] = (lo >> 8) & 0xff;
13926 dev->dev_addr[5] = (lo >> 0) & 0xff;
13928 /* Some old bootcode may report a 0 MAC address in SRAM */
13929 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13932 /* Next, try NVRAM. */
13933 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13934 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13935 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13936 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13937 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13939 /* Finally just fetch it out of the MAC control regs. */
13941 hi = tr32(MAC_ADDR_0_HIGH);
13942 lo = tr32(MAC_ADDR_0_LOW);
13944 dev->dev_addr[5] = lo & 0xff;
13945 dev->dev_addr[4] = (lo >> 8) & 0xff;
13946 dev->dev_addr[3] = (lo >> 16) & 0xff;
13947 dev->dev_addr[2] = (lo >> 24) & 0xff;
13948 dev->dev_addr[1] = hi & 0xff;
13949 dev->dev_addr[0] = (hi >> 8) & 0xff;
13953 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13954 #ifdef CONFIG_SPARC
13955 if (!tg3_get_default_macaddr_sparc(tp))
13960 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13964 #define BOUNDARY_SINGLE_CACHELINE 1
13965 #define BOUNDARY_MULTI_CACHELINE 2
13967 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13969 int cacheline_size;
13973 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13975 cacheline_size = 1024;
13977 cacheline_size = (int) byte * 4;
13979 /* On 5703 and later chips, the boundary bits have no
13982 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13983 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13984 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13987 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13988 goal = BOUNDARY_MULTI_CACHELINE;
13990 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13991 goal = BOUNDARY_SINGLE_CACHELINE;
13997 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
13998 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14005 /* PCI controllers on most RISC systems tend to disconnect
14006 * when a device tries to burst across a cache-line boundary.
14007 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14009 * Unfortunately, for PCI-E there are only limited
14010 * write-side controls for this, and thus for reads
14011 * we will still get the disconnects. We'll also waste
14012 * these PCI cycles for both read and write for chips
14013 * other than 5700 and 5701 which do not implement the
14016 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
14017 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
14018 switch (cacheline_size) {
14023 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14024 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14025 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14027 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14028 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14033 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14034 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14038 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14039 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14042 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14043 switch (cacheline_size) {
14047 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14048 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14049 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14055 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14056 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14060 switch (cacheline_size) {
14062 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14063 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14064 DMA_RWCTRL_WRITE_BNDRY_16);
14069 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14070 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14071 DMA_RWCTRL_WRITE_BNDRY_32);
14076 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14077 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14078 DMA_RWCTRL_WRITE_BNDRY_64);
14083 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14084 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14085 DMA_RWCTRL_WRITE_BNDRY_128);
14090 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14091 DMA_RWCTRL_WRITE_BNDRY_256);
14094 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14095 DMA_RWCTRL_WRITE_BNDRY_512);
14099 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14100 DMA_RWCTRL_WRITE_BNDRY_1024);
14109 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14111 struct tg3_internal_buffer_desc test_desc;
14112 u32 sram_dma_descs;
14115 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14117 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14118 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14119 tw32(RDMAC_STATUS, 0);
14120 tw32(WDMAC_STATUS, 0);
14122 tw32(BUFMGR_MODE, 0);
14123 tw32(FTQ_RESET, 0);
14125 test_desc.addr_hi = ((u64) buf_dma) >> 32;
14126 test_desc.addr_lo = buf_dma & 0xffffffff;
14127 test_desc.nic_mbuf = 0x00002100;
14128 test_desc.len = size;
14131 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14132 * the *second* time the tg3 driver was getting loaded after an
14135 * Broadcom tells me:
14136 * ...the DMA engine is connected to the GRC block and a DMA
14137 * reset may affect the GRC block in some unpredictable way...
14138 * The behavior of resets to individual blocks has not been tested.
14140 * Broadcom noted the GRC reset will also reset all sub-components.
14143 test_desc.cqid_sqid = (13 << 8) | 2;
14145 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14148 test_desc.cqid_sqid = (16 << 8) | 7;
14150 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14153 test_desc.flags = 0x00000005;
14155 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14158 val = *(((u32 *)&test_desc) + i);
14159 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14160 sram_dma_descs + (i * sizeof(u32)));
14161 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14163 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14166 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14168 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14171 for (i = 0; i < 40; i++) {
14175 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14177 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14178 if ((val & 0xffff) == sram_dma_descs) {
14189 #define TEST_BUFFER_SIZE 0x2000
14191 DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = {
14192 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14196 static int __devinit tg3_test_dma(struct tg3 *tp)
14198 dma_addr_t buf_dma;
14199 u32 *buf, saved_dma_rwctrl;
14202 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14203 &buf_dma, GFP_KERNEL);
14209 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14210 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14212 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14214 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)
14217 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14218 /* DMA read watermark not used on PCIE */
14219 tp->dma_rwctrl |= 0x00180000;
14220 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
14221 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14222 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14223 tp->dma_rwctrl |= 0x003f0000;
14225 tp->dma_rwctrl |= 0x003f000f;
14227 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14229 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14230 u32 read_water = 0x7;
14232 /* If the 5704 is behind the EPB bridge, we can
14233 * do the less restrictive ONE_DMA workaround for
14234 * better performance.
14236 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
14237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14238 tp->dma_rwctrl |= 0x8000;
14239 else if (ccval == 0x6 || ccval == 0x7)
14240 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14242 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14244 /* Set bit 23 to enable PCIX hw bug fix */
14246 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14247 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14250 /* 5780 always in PCIX mode */
14251 tp->dma_rwctrl |= 0x00144000;
14252 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14253 /* 5714 always in PCIX mode */
14254 tp->dma_rwctrl |= 0x00148000;
14256 tp->dma_rwctrl |= 0x001b000f;
14260 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14262 tp->dma_rwctrl &= 0xfffffff0;
14264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14265 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14266 /* Remove this if it causes problems for some boards. */
14267 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14269 /* On 5700/5701 chips, we need to set this bit.
14270 * Otherwise the chip will issue cacheline transactions
14271 * to streamable DMA memory with not all the byte
14272 * enables turned on. This is an error on several
14273 * RISC PCI controllers, in particular sparc64.
14275 * On 5703/5704 chips, this bit has been reassigned
14276 * a different meaning. In particular, it is used
14277 * on those chips to enable a PCI-X workaround.
14279 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14282 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14285 /* Unneeded, already done by tg3_get_invariants. */
14286 tg3_switch_clocks(tp);
14289 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14290 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14293 /* It is best to perform DMA test with maximum write burst size
14294 * to expose the 5700/5701 write DMA bug.
14296 saved_dma_rwctrl = tp->dma_rwctrl;
14297 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14298 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14303 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14306 /* Send the buffer to the chip. */
14307 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14309 dev_err(&tp->pdev->dev,
14310 "%s: Buffer write failed. err = %d\n",
14316 /* validate data reached card RAM correctly. */
14317 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14319 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14320 if (le32_to_cpu(val) != p[i]) {
14321 dev_err(&tp->pdev->dev,
14322 "%s: Buffer corrupted on device! "
14323 "(%d != %d)\n", __func__, val, i);
14324 /* ret = -ENODEV here? */
14329 /* Now read it back. */
14330 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14332 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14333 "err = %d\n", __func__, ret);
14338 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14342 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14343 DMA_RWCTRL_WRITE_BNDRY_16) {
14344 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14345 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14346 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14349 dev_err(&tp->pdev->dev,
14350 "%s: Buffer corrupted on read back! "
14351 "(%d != %d)\n", __func__, p[i], i);
14357 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14363 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14364 DMA_RWCTRL_WRITE_BNDRY_16) {
14366 /* DMA test passed without adjusting DMA boundary,
14367 * now look for chipsets that are known to expose the
14368 * DMA bug without failing the test.
14370 if (pci_dev_present(dma_wait_state_chipsets)) {
14371 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14372 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14374 /* Safe to use the calculated DMA boundary. */
14375 tp->dma_rwctrl = saved_dma_rwctrl;
14378 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14382 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14387 static void __devinit tg3_init_link_config(struct tg3 *tp)
14389 tp->link_config.advertising =
14390 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14391 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14392 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14393 ADVERTISED_Autoneg | ADVERTISED_MII);
14394 tp->link_config.speed = SPEED_INVALID;
14395 tp->link_config.duplex = DUPLEX_INVALID;
14396 tp->link_config.autoneg = AUTONEG_ENABLE;
14397 tp->link_config.active_speed = SPEED_INVALID;
14398 tp->link_config.active_duplex = DUPLEX_INVALID;
14399 tp->link_config.orig_speed = SPEED_INVALID;
14400 tp->link_config.orig_duplex = DUPLEX_INVALID;
14401 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14404 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14406 if (tp->tg3_flags3 & TG3_FLG3_5717_PLUS) {
14407 tp->bufmgr_config.mbuf_read_dma_low_water =
14408 DEFAULT_MB_RDMA_LOW_WATER_5705;
14409 tp->bufmgr_config.mbuf_mac_rx_low_water =
14410 DEFAULT_MB_MACRX_LOW_WATER_57765;
14411 tp->bufmgr_config.mbuf_high_water =
14412 DEFAULT_MB_HIGH_WATER_57765;
14414 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14415 DEFAULT_MB_RDMA_LOW_WATER_5705;
14416 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14417 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14418 tp->bufmgr_config.mbuf_high_water_jumbo =
14419 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14420 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14421 tp->bufmgr_config.mbuf_read_dma_low_water =
14422 DEFAULT_MB_RDMA_LOW_WATER_5705;
14423 tp->bufmgr_config.mbuf_mac_rx_low_water =
14424 DEFAULT_MB_MACRX_LOW_WATER_5705;
14425 tp->bufmgr_config.mbuf_high_water =
14426 DEFAULT_MB_HIGH_WATER_5705;
14427 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14428 tp->bufmgr_config.mbuf_mac_rx_low_water =
14429 DEFAULT_MB_MACRX_LOW_WATER_5906;
14430 tp->bufmgr_config.mbuf_high_water =
14431 DEFAULT_MB_HIGH_WATER_5906;
14434 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14435 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14436 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14437 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14438 tp->bufmgr_config.mbuf_high_water_jumbo =
14439 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14441 tp->bufmgr_config.mbuf_read_dma_low_water =
14442 DEFAULT_MB_RDMA_LOW_WATER;
14443 tp->bufmgr_config.mbuf_mac_rx_low_water =
14444 DEFAULT_MB_MACRX_LOW_WATER;
14445 tp->bufmgr_config.mbuf_high_water =
14446 DEFAULT_MB_HIGH_WATER;
14448 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14449 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14450 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14451 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14452 tp->bufmgr_config.mbuf_high_water_jumbo =
14453 DEFAULT_MB_HIGH_WATER_JUMBO;
14456 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14457 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14460 static char * __devinit tg3_phy_string(struct tg3 *tp)
14462 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14463 case TG3_PHY_ID_BCM5400: return "5400";
14464 case TG3_PHY_ID_BCM5401: return "5401";
14465 case TG3_PHY_ID_BCM5411: return "5411";
14466 case TG3_PHY_ID_BCM5701: return "5701";
14467 case TG3_PHY_ID_BCM5703: return "5703";
14468 case TG3_PHY_ID_BCM5704: return "5704";
14469 case TG3_PHY_ID_BCM5705: return "5705";
14470 case TG3_PHY_ID_BCM5750: return "5750";
14471 case TG3_PHY_ID_BCM5752: return "5752";
14472 case TG3_PHY_ID_BCM5714: return "5714";
14473 case TG3_PHY_ID_BCM5780: return "5780";
14474 case TG3_PHY_ID_BCM5755: return "5755";
14475 case TG3_PHY_ID_BCM5787: return "5787";
14476 case TG3_PHY_ID_BCM5784: return "5784";
14477 case TG3_PHY_ID_BCM5756: return "5722/5756";
14478 case TG3_PHY_ID_BCM5906: return "5906";
14479 case TG3_PHY_ID_BCM5761: return "5761";
14480 case TG3_PHY_ID_BCM5718C: return "5718C";
14481 case TG3_PHY_ID_BCM5718S: return "5718S";
14482 case TG3_PHY_ID_BCM57765: return "57765";
14483 case TG3_PHY_ID_BCM5719C: return "5719C";
14484 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14485 case 0: return "serdes";
14486 default: return "unknown";
14490 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14492 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14493 strcpy(str, "PCI Express");
14495 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14496 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14498 strcpy(str, "PCIX:");
14500 if ((clock_ctrl == 7) ||
14501 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14502 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14503 strcat(str, "133MHz");
14504 else if (clock_ctrl == 0)
14505 strcat(str, "33MHz");
14506 else if (clock_ctrl == 2)
14507 strcat(str, "50MHz");
14508 else if (clock_ctrl == 4)
14509 strcat(str, "66MHz");
14510 else if (clock_ctrl == 6)
14511 strcat(str, "100MHz");
14513 strcpy(str, "PCI:");
14514 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14515 strcat(str, "66MHz");
14517 strcat(str, "33MHz");
14519 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14520 strcat(str, ":32-bit");
14522 strcat(str, ":64-bit");
14526 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14528 struct pci_dev *peer;
14529 unsigned int func, devnr = tp->pdev->devfn & ~7;
14531 for (func = 0; func < 8; func++) {
14532 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14533 if (peer && peer != tp->pdev)
14537 /* 5704 can be configured in single-port mode, set peer to
14538 * tp->pdev in that case.
14546 * We don't need to keep the refcount elevated; there's no way
14547 * to remove one half of this device without removing the other
14554 static void __devinit tg3_init_coal(struct tg3 *tp)
14556 struct ethtool_coalesce *ec = &tp->coal;
14558 memset(ec, 0, sizeof(*ec));
14559 ec->cmd = ETHTOOL_GCOALESCE;
14560 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14561 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14562 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14563 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14564 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14565 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14566 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14567 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14568 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14570 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14571 HOSTCC_MODE_CLRTICK_TXBD)) {
14572 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14573 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14574 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14575 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14578 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14579 ec->rx_coalesce_usecs_irq = 0;
14580 ec->tx_coalesce_usecs_irq = 0;
14581 ec->stats_block_coalesce_usecs = 0;
14585 static const struct net_device_ops tg3_netdev_ops = {
14586 .ndo_open = tg3_open,
14587 .ndo_stop = tg3_close,
14588 .ndo_start_xmit = tg3_start_xmit,
14589 .ndo_get_stats64 = tg3_get_stats64,
14590 .ndo_validate_addr = eth_validate_addr,
14591 .ndo_set_multicast_list = tg3_set_rx_mode,
14592 .ndo_set_mac_address = tg3_set_mac_addr,
14593 .ndo_do_ioctl = tg3_ioctl,
14594 .ndo_tx_timeout = tg3_tx_timeout,
14595 .ndo_change_mtu = tg3_change_mtu,
14596 #ifdef CONFIG_NET_POLL_CONTROLLER
14597 .ndo_poll_controller = tg3_poll_controller,
14601 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14602 .ndo_open = tg3_open,
14603 .ndo_stop = tg3_close,
14604 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14605 .ndo_get_stats64 = tg3_get_stats64,
14606 .ndo_validate_addr = eth_validate_addr,
14607 .ndo_set_multicast_list = tg3_set_rx_mode,
14608 .ndo_set_mac_address = tg3_set_mac_addr,
14609 .ndo_do_ioctl = tg3_ioctl,
14610 .ndo_tx_timeout = tg3_tx_timeout,
14611 .ndo_change_mtu = tg3_change_mtu,
14612 #ifdef CONFIG_NET_POLL_CONTROLLER
14613 .ndo_poll_controller = tg3_poll_controller,
14617 static int __devinit tg3_init_one(struct pci_dev *pdev,
14618 const struct pci_device_id *ent)
14620 struct net_device *dev;
14622 int i, err, pm_cap;
14623 u32 sndmbx, rcvmbx, intmbx;
14625 u64 dma_mask, persist_dma_mask;
14627 printk_once(KERN_INFO "%s\n", version);
14629 err = pci_enable_device(pdev);
14631 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14635 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14637 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14638 goto err_out_disable_pdev;
14641 pci_set_master(pdev);
14643 /* Find power-management capability. */
14644 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14646 dev_err(&pdev->dev,
14647 "Cannot find Power Management capability, aborting\n");
14649 goto err_out_free_res;
14652 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14654 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14656 goto err_out_free_res;
14659 SET_NETDEV_DEV(dev, &pdev->dev);
14661 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14663 tp = netdev_priv(dev);
14666 tp->pm_cap = pm_cap;
14667 tp->rx_mode = TG3_DEF_RX_MODE;
14668 tp->tx_mode = TG3_DEF_TX_MODE;
14671 tp->msg_enable = tg3_debug;
14673 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14675 /* The word/byte swap controls here control register access byte
14676 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14679 tp->misc_host_ctrl =
14680 MISC_HOST_CTRL_MASK_PCI_INT |
14681 MISC_HOST_CTRL_WORD_SWAP |
14682 MISC_HOST_CTRL_INDIR_ACCESS |
14683 MISC_HOST_CTRL_PCISTATE_RW;
14685 /* The NONFRM (non-frame) byte/word swap controls take effect
14686 * on descriptor entries, anything which isn't packet data.
14688 * The StrongARM chips on the board (one for tx, one for rx)
14689 * are running in big-endian mode.
14691 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14692 GRC_MODE_WSWAP_NONFRM_DATA);
14693 #ifdef __BIG_ENDIAN
14694 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14696 spin_lock_init(&tp->lock);
14697 spin_lock_init(&tp->indirect_lock);
14698 INIT_WORK(&tp->reset_task, tg3_reset_task);
14700 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14702 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14704 goto err_out_free_dev;
14707 tg3_init_link_config(tp);
14709 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14710 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14712 dev->ethtool_ops = &tg3_ethtool_ops;
14713 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14714 dev->irq = pdev->irq;
14716 err = tg3_get_invariants(tp);
14718 dev_err(&pdev->dev,
14719 "Problem fetching invariants of chip, aborting\n");
14720 goto err_out_iounmap;
14723 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14724 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
14725 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14726 dev->netdev_ops = &tg3_netdev_ops;
14728 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14731 /* The EPB bridge inside 5714, 5715, and 5780 and any
14732 * device behind the EPB cannot support DMA addresses > 40-bit.
14733 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14734 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14735 * do DMA address check in tg3_start_xmit().
14737 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14738 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14739 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14740 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14741 #ifdef CONFIG_HIGHMEM
14742 dma_mask = DMA_BIT_MASK(64);
14745 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14747 /* Configure DMA attributes. */
14748 if (dma_mask > DMA_BIT_MASK(32)) {
14749 err = pci_set_dma_mask(pdev, dma_mask);
14751 dev->features |= NETIF_F_HIGHDMA;
14752 err = pci_set_consistent_dma_mask(pdev,
14755 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14756 "DMA for consistent allocations\n");
14757 goto err_out_iounmap;
14761 if (err || dma_mask == DMA_BIT_MASK(32)) {
14762 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14764 dev_err(&pdev->dev,
14765 "No usable DMA configuration, aborting\n");
14766 goto err_out_iounmap;
14770 tg3_init_bufmgr_config(tp);
14772 /* Selectively allow TSO based on operating conditions */
14773 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14774 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14775 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14777 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14778 tp->fw_needed = NULL;
14781 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14782 tp->fw_needed = FIRMWARE_TG3;
14784 /* TSO is on by default on chips that support hardware TSO.
14785 * Firmware TSO on older chips gives lower performance, so it
14786 * is off by default, but can be enabled using ethtool.
14788 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14789 (dev->features & NETIF_F_IP_CSUM)) {
14790 dev->features |= NETIF_F_TSO;
14791 vlan_features_add(dev, NETIF_F_TSO);
14793 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14794 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14795 if (dev->features & NETIF_F_IPV6_CSUM) {
14796 dev->features |= NETIF_F_TSO6;
14797 vlan_features_add(dev, NETIF_F_TSO6);
14799 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14800 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14801 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14802 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14804 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14805 dev->features |= NETIF_F_TSO_ECN;
14806 vlan_features_add(dev, NETIF_F_TSO_ECN);
14810 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14811 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14812 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14813 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14814 tp->rx_pending = 63;
14817 err = tg3_get_device_address(tp);
14819 dev_err(&pdev->dev,
14820 "Could not obtain valid ethernet address, aborting\n");
14821 goto err_out_iounmap;
14824 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14825 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14826 if (!tp->aperegs) {
14827 dev_err(&pdev->dev,
14828 "Cannot map APE registers, aborting\n");
14830 goto err_out_iounmap;
14833 tg3_ape_lock_init(tp);
14835 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14836 tg3_read_dash_ver(tp);
14840 * Reset chip in case UNDI or EFI driver did not shutdown
14841 * DMA self test will enable WDMAC and we'll see (spurious)
14842 * pending DMA on the PCI bus at that point.
14844 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14845 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14846 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14847 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14850 err = tg3_test_dma(tp);
14852 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14853 goto err_out_apeunmap;
14856 /* flow control autonegotiation is default behavior */
14857 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14858 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14860 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14861 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14862 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14863 for (i = 0; i < tp->irq_max; i++) {
14864 struct tg3_napi *tnapi = &tp->napi[i];
14867 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14869 tnapi->int_mbox = intmbx;
14875 tnapi->consmbox = rcvmbx;
14876 tnapi->prodmbox = sndmbx;
14879 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14881 tnapi->coal_now = HOSTCC_MODE_NOW;
14883 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14887 * If we support MSIX, we'll be using RSS. If we're using
14888 * RSS, the first vector only handles link interrupts and the
14889 * remaining vectors handle rx and tx interrupts. Reuse the
14890 * mailbox values for the next iteration. The values we setup
14891 * above are still useful for the single vectored mode.
14906 pci_set_drvdata(pdev, dev);
14908 err = register_netdev(dev);
14910 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14911 goto err_out_apeunmap;
14914 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14915 tp->board_part_number,
14916 tp->pci_chip_rev_id,
14917 tg3_bus_string(tp, str),
14920 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
14921 struct phy_device *phydev;
14922 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14924 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14925 phydev->drv->name, dev_name(&phydev->dev));
14929 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
14930 ethtype = "10/100Base-TX";
14931 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
14932 ethtype = "1000Base-SX";
14934 ethtype = "10/100/1000Base-T";
14936 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14937 "(WireSpeed[%d])\n", tg3_phy_string(tp), ethtype,
14938 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0);
14941 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14942 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14943 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14944 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
14945 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14946 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14947 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14949 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14950 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14956 iounmap(tp->aperegs);
14957 tp->aperegs = NULL;
14970 pci_release_regions(pdev);
14972 err_out_disable_pdev:
14973 pci_disable_device(pdev);
14974 pci_set_drvdata(pdev, NULL);
14978 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14980 struct net_device *dev = pci_get_drvdata(pdev);
14983 struct tg3 *tp = netdev_priv(dev);
14986 release_firmware(tp->fw);
14988 cancel_work_sync(&tp->reset_task);
14990 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14995 unregister_netdev(dev);
14997 iounmap(tp->aperegs);
14998 tp->aperegs = NULL;
15005 pci_release_regions(pdev);
15006 pci_disable_device(pdev);
15007 pci_set_drvdata(pdev, NULL);
15011 #ifdef CONFIG_PM_SLEEP
15012 static int tg3_suspend(struct device *device)
15014 struct pci_dev *pdev = to_pci_dev(device);
15015 struct net_device *dev = pci_get_drvdata(pdev);
15016 struct tg3 *tp = netdev_priv(dev);
15019 if (!netif_running(dev))
15022 flush_work_sync(&tp->reset_task);
15024 tg3_netif_stop(tp);
15026 del_timer_sync(&tp->timer);
15028 tg3_full_lock(tp, 1);
15029 tg3_disable_ints(tp);
15030 tg3_full_unlock(tp);
15032 netif_device_detach(dev);
15034 tg3_full_lock(tp, 0);
15035 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15036 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15037 tg3_full_unlock(tp);
15039 err = tg3_power_down_prepare(tp);
15043 tg3_full_lock(tp, 0);
15045 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15046 err2 = tg3_restart_hw(tp, 1);
15050 tp->timer.expires = jiffies + tp->timer_offset;
15051 add_timer(&tp->timer);
15053 netif_device_attach(dev);
15054 tg3_netif_start(tp);
15057 tg3_full_unlock(tp);
15066 static int tg3_resume(struct device *device)
15068 struct pci_dev *pdev = to_pci_dev(device);
15069 struct net_device *dev = pci_get_drvdata(pdev);
15070 struct tg3 *tp = netdev_priv(dev);
15073 if (!netif_running(dev))
15076 netif_device_attach(dev);
15078 tg3_full_lock(tp, 0);
15080 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
15081 err = tg3_restart_hw(tp, 1);
15085 tp->timer.expires = jiffies + tp->timer_offset;
15086 add_timer(&tp->timer);
15088 tg3_netif_start(tp);
15091 tg3_full_unlock(tp);
15099 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15100 #define TG3_PM_OPS (&tg3_pm_ops)
15104 #define TG3_PM_OPS NULL
15106 #endif /* CONFIG_PM_SLEEP */
15108 static struct pci_driver tg3_driver = {
15109 .name = DRV_MODULE_NAME,
15110 .id_table = tg3_pci_tbl,
15111 .probe = tg3_init_one,
15112 .remove = __devexit_p(tg3_remove_one),
15113 .driver.pm = TG3_PM_OPS,
15116 static int __init tg3_init(void)
15118 return pci_register_driver(&tg3_driver);
15121 static void __exit tg3_cleanup(void)
15123 pci_unregister_driver(&tg3_driver);
15126 module_init(tg3_init);
15127 module_exit(tg3_cleanup);