2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2010 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/firmware.h>
45 #include <net/checksum.h>
48 #include <asm/system.h>
50 #include <asm/byteorder.h>
51 #include <asm/uaccess.h>
54 #include <asm/idprom.h>
61 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
62 #define TG3_VLAN_TAG_USED 1
64 #define TG3_VLAN_TAG_USED 0
69 #define DRV_MODULE_NAME "tg3"
70 #define DRV_MODULE_VERSION "3.111"
71 #define DRV_MODULE_RELDATE "June 5, 2010"
73 #define TG3_DEF_MAC_MODE 0
74 #define TG3_DEF_RX_MODE 0
75 #define TG3_DEF_TX_MODE 0
76 #define TG3_DEF_MSG_ENABLE \
86 /* length of time before we decide the hardware is borked,
87 * and dev->tx_timeout() should be called to fix the problem
89 #define TG3_TX_TIMEOUT (5 * HZ)
91 /* hardware minimum and maximum for a single frame's data payload */
92 #define TG3_MIN_MTU 60
93 #define TG3_MAX_MTU(tp) \
94 ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) ? 9000 : 1500)
96 /* These numbers seem to be hard coded in the NIC firmware somehow.
97 * You can't change the ring sizes, but you can change where you place
98 * them in the NIC onboard memory.
100 #define TG3_RX_RING_SIZE 512
101 #define TG3_DEF_RX_RING_PENDING 200
102 #define TG3_RX_JUMBO_RING_SIZE 256
103 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
104 #define TG3_RSS_INDIR_TBL_SIZE 128
106 /* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et
108 * al. operations are done with shifts and masks instead of with
109 * hw multiply/modulo instructions. Another solution would be to
110 * replace things like '% foo' with '& (foo - 1)'.
112 #define TG3_RX_RCB_RING_SIZE(tp) \
113 (((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) && \
114 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) ? 1024 : 512)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_ext_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define TG3_RX_DMA_ALIGN 16
130 #define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
132 #define TG3_DMA_BYTE_ENAB 64
134 #define TG3_RX_STD_DMA_SZ 1536
135 #define TG3_RX_JMB_DMA_SZ 9046
137 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
139 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
140 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
142 #define TG3_RX_STD_BUFF_RING_SIZE \
143 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
145 #define TG3_RX_JMB_BUFF_RING_SIZE \
146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
148 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
149 * that are at least dword aligned when used in PCIX mode. The driver
150 * works around this bug by double copying the packet. This workaround
151 * is built into the normal double copy length check for efficiency.
153 * However, the double copy is only necessary on those architectures
154 * where unaligned memory accesses are inefficient. For those architectures
155 * where unaligned memory accesses incur little penalty, we can reintegrate
156 * the 5701 in the normal rx path. Doing so saves a device structure
157 * dereference by hardcoding the double copy threshold in place.
159 #define TG3_RX_COPY_THRESHOLD 256
160 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
161 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
163 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
166 /* minimum number of free TX descriptors required to wake up TX process */
167 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
169 #define TG3_RAW_IP_ALIGN 2
171 /* number of ETHTOOL_GSTATS u64's */
172 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
174 #define TG3_NUM_TEST 6
176 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
178 #define FIRMWARE_TG3 "tigon/tg3.bin"
179 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
180 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
182 static char version[] __devinitdata =
183 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
185 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
186 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
187 MODULE_LICENSE("GPL");
188 MODULE_VERSION(DRV_MODULE_VERSION);
189 MODULE_FIRMWARE(FIRMWARE_TG3);
190 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
191 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
193 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
194 module_param(tg3_debug, int, 0);
195 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
197 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
214 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
215 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
216 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
217 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
218 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
219 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
220 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
221 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
222 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
223 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
224 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
225 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
226 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
227 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
228 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
274 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
275 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
276 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
277 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
278 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
279 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
280 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
284 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
286 static const struct {
287 const char string[ETH_GSTRING_LEN];
288 } ethtool_stats_keys[TG3_NUM_STATS] = {
291 { "rx_ucast_packets" },
292 { "rx_mcast_packets" },
293 { "rx_bcast_packets" },
295 { "rx_align_errors" },
296 { "rx_xon_pause_rcvd" },
297 { "rx_xoff_pause_rcvd" },
298 { "rx_mac_ctrl_rcvd" },
299 { "rx_xoff_entered" },
300 { "rx_frame_too_long_errors" },
302 { "rx_undersize_packets" },
303 { "rx_in_length_errors" },
304 { "rx_out_length_errors" },
305 { "rx_64_or_less_octet_packets" },
306 { "rx_65_to_127_octet_packets" },
307 { "rx_128_to_255_octet_packets" },
308 { "rx_256_to_511_octet_packets" },
309 { "rx_512_to_1023_octet_packets" },
310 { "rx_1024_to_1522_octet_packets" },
311 { "rx_1523_to_2047_octet_packets" },
312 { "rx_2048_to_4095_octet_packets" },
313 { "rx_4096_to_8191_octet_packets" },
314 { "rx_8192_to_9022_octet_packets" },
321 { "tx_flow_control" },
323 { "tx_single_collisions" },
324 { "tx_mult_collisions" },
326 { "tx_excessive_collisions" },
327 { "tx_late_collisions" },
328 { "tx_collide_2times" },
329 { "tx_collide_3times" },
330 { "tx_collide_4times" },
331 { "tx_collide_5times" },
332 { "tx_collide_6times" },
333 { "tx_collide_7times" },
334 { "tx_collide_8times" },
335 { "tx_collide_9times" },
336 { "tx_collide_10times" },
337 { "tx_collide_11times" },
338 { "tx_collide_12times" },
339 { "tx_collide_13times" },
340 { "tx_collide_14times" },
341 { "tx_collide_15times" },
342 { "tx_ucast_packets" },
343 { "tx_mcast_packets" },
344 { "tx_bcast_packets" },
345 { "tx_carrier_sense_errors" },
349 { "dma_writeq_full" },
350 { "dma_write_prioq_full" },
354 { "rx_threshold_hit" },
356 { "dma_readq_full" },
357 { "dma_read_prioq_full" },
358 { "tx_comp_queue_full" },
360 { "ring_set_send_prod_index" },
361 { "ring_status_update" },
363 { "nic_avoided_irqs" },
364 { "nic_tx_threshold_hit" }
367 static const struct {
368 const char string[ETH_GSTRING_LEN];
369 } ethtool_test_keys[TG3_NUM_TEST] = {
370 { "nvram test (online) " },
371 { "link test (online) " },
372 { "register test (offline)" },
373 { "memory test (offline)" },
374 { "loopback test (offline)" },
375 { "interrupt test (offline)" },
378 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
380 writel(val, tp->regs + off);
383 static u32 tg3_read32(struct tg3 *tp, u32 off)
385 return readl(tp->regs + off);
388 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
390 writel(val, tp->aperegs + off);
393 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
395 return readl(tp->aperegs + off);
398 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
402 spin_lock_irqsave(&tp->indirect_lock, flags);
403 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
404 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
405 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
410 writel(val, tp->regs + off);
411 readl(tp->regs + off);
414 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
419 spin_lock_irqsave(&tp->indirect_lock, flags);
420 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
421 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
422 spin_unlock_irqrestore(&tp->indirect_lock, flags);
426 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
430 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
431 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
432 TG3_64BIT_REG_LOW, val);
435 if (off == TG3_RX_STD_PROD_IDX_REG) {
436 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
437 TG3_64BIT_REG_LOW, val);
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
446 /* In indirect mode when disabling interrupts, we also need
447 * to clear the interrupt bit in the GRC local ctrl register.
449 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
451 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
452 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
456 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
461 spin_lock_irqsave(&tp->indirect_lock, flags);
462 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
463 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
464 spin_unlock_irqrestore(&tp->indirect_lock, flags);
468 /* usec_wait specifies the wait time in usec when writing to certain registers
469 * where it is unsafe to read back the register without some delay.
470 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
471 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
473 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
475 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
476 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
477 /* Non-posted methods */
478 tp->write32(tp, off, val);
481 tg3_write32(tp, off, val);
486 /* Wait again after the read for the posted method to guarantee that
487 * the wait time is met.
493 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
495 tp->write32_mbox(tp, off, val);
496 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
497 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
498 tp->read32_mbox(tp, off);
501 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
503 void __iomem *mbox = tp->regs + off;
505 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
507 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
511 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
513 return readl(tp->regs + off + GRCMBOX_BASE);
516 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
518 writel(val, tp->regs + off + GRCMBOX_BASE);
521 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
522 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
523 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
524 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
525 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
527 #define tw32(reg, val) tp->write32(tp, reg, val)
528 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
529 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
530 #define tr32(reg) tp->read32(tp, reg)
532 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
536 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
537 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
540 spin_lock_irqsave(&tp->indirect_lock, flags);
541 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
542 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
543 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
545 /* Always leave this as zero. */
546 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
548 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
549 tw32_f(TG3PCI_MEM_WIN_DATA, val);
551 /* Always leave this as zero. */
552 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
557 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
561 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
562 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
567 spin_lock_irqsave(&tp->indirect_lock, flags);
568 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
569 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
570 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
572 /* Always leave this as zero. */
573 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
575 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
576 *val = tr32(TG3PCI_MEM_WIN_DATA);
578 /* Always leave this as zero. */
579 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
581 spin_unlock_irqrestore(&tp->indirect_lock, flags);
584 static void tg3_ape_lock_init(struct tg3 *tp)
589 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
590 regbase = TG3_APE_LOCK_GRANT;
592 regbase = TG3_APE_PER_LOCK_GRANT;
594 /* Make sure the driver hasn't any stale locks. */
595 for (i = 0; i < 8; i++)
596 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
599 static int tg3_ape_lock(struct tg3 *tp, int locknum)
603 u32 status, req, gnt;
605 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
609 case TG3_APE_LOCK_GRC:
610 case TG3_APE_LOCK_MEM:
616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
617 req = TG3_APE_LOCK_REQ;
618 gnt = TG3_APE_LOCK_GRANT;
620 req = TG3_APE_PER_LOCK_REQ;
621 gnt = TG3_APE_PER_LOCK_GRANT;
626 tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
628 /* Wait for up to 1 millisecond to acquire lock. */
629 for (i = 0; i < 100; i++) {
630 status = tg3_ape_read32(tp, gnt + off);
631 if (status == APE_LOCK_GRANT_DRIVER)
636 if (status != APE_LOCK_GRANT_DRIVER) {
637 /* Revoke the lock request. */
638 tg3_ape_write32(tp, gnt + off,
639 APE_LOCK_GRANT_DRIVER);
647 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
651 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
655 case TG3_APE_LOCK_GRC:
656 case TG3_APE_LOCK_MEM:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663 gnt = TG3_APE_LOCK_GRANT;
665 gnt = TG3_APE_PER_LOCK_GRANT;
667 tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
670 static void tg3_disable_ints(struct tg3 *tp)
674 tw32(TG3PCI_MISC_HOST_CTRL,
675 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
676 for (i = 0; i < tp->irq_max; i++)
677 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
680 static void tg3_enable_ints(struct tg3 *tp)
687 tw32(TG3PCI_MISC_HOST_CTRL,
688 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
690 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
691 for (i = 0; i < tp->irq_cnt; i++) {
692 struct tg3_napi *tnapi = &tp->napi[i];
694 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
695 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
696 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
698 tp->coal_now |= tnapi->coal_now;
701 /* Force an initial interrupt */
702 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
703 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
704 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
706 tw32(HOSTCC_MODE, tp->coal_now);
708 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
711 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
713 struct tg3 *tp = tnapi->tp;
714 struct tg3_hw_status *sblk = tnapi->hw_status;
715 unsigned int work_exists = 0;
717 /* check for phy events */
718 if (!(tp->tg3_flags &
719 (TG3_FLAG_USE_LINKCHG_REG |
720 TG3_FLAG_POLL_SERDES))) {
721 if (sblk->status & SD_STATUS_LINK_CHG)
724 /* check for RX/TX work to do */
725 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
726 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
733 * similar to tg3_enable_ints, but it accurately determines whether there
734 * is new work pending and can return without flushing the PIO write
735 * which reenables interrupts
737 static void tg3_int_reenable(struct tg3_napi *tnapi)
739 struct tg3 *tp = tnapi->tp;
741 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
744 /* When doing tagged status, this work check is unnecessary.
745 * The last_tag we write above tells the chip which piece of
746 * work we've completed.
748 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
750 tw32(HOSTCC_MODE, tp->coalesce_mode |
751 HOSTCC_MODE_ENABLE | tnapi->coal_now);
754 static void tg3_napi_disable(struct tg3 *tp)
758 for (i = tp->irq_cnt - 1; i >= 0; i--)
759 napi_disable(&tp->napi[i].napi);
762 static void tg3_napi_enable(struct tg3 *tp)
766 for (i = 0; i < tp->irq_cnt; i++)
767 napi_enable(&tp->napi[i].napi);
770 static inline void tg3_netif_stop(struct tg3 *tp)
772 tp->dev->trans_start = jiffies; /* prevent tx timeout */
773 tg3_napi_disable(tp);
774 netif_tx_disable(tp->dev);
777 static inline void tg3_netif_start(struct tg3 *tp)
779 /* NOTE: unconditional netif_tx_wake_all_queues is only
780 * appropriate so long as all callers are assured to
781 * have free tx slots (such as after tg3_init_hw)
783 netif_tx_wake_all_queues(tp->dev);
786 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
790 static void tg3_switch_clocks(struct tg3 *tp)
795 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
796 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
799 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
801 orig_clock_ctrl = clock_ctrl;
802 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
803 CLOCK_CTRL_CLKRUN_OENABLE |
805 tp->pci_clock_ctrl = clock_ctrl;
807 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
808 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
809 tw32_wait_f(TG3PCI_CLOCK_CTRL,
810 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
812 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
813 tw32_wait_f(TG3PCI_CLOCK_CTRL,
815 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
817 tw32_wait_f(TG3PCI_CLOCK_CTRL,
818 clock_ctrl | (CLOCK_CTRL_ALTCLK),
821 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
824 #define PHY_BUSY_LOOPS 5000
826 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
832 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
834 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
840 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
841 MI_COM_PHY_ADDR_MASK);
842 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
843 MI_COM_REG_ADDR_MASK);
844 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
846 tw32_f(MAC_MI_COM, frame_val);
848 loops = PHY_BUSY_LOOPS;
851 frame_val = tr32(MAC_MI_COM);
853 if ((frame_val & MI_COM_BUSY) == 0) {
855 frame_val = tr32(MAC_MI_COM);
863 *val = frame_val & MI_COM_DATA_MASK;
867 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
868 tw32_f(MAC_MI_MODE, tp->mi_mode);
875 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
881 if ((tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
882 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
885 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
887 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
891 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
892 MI_COM_PHY_ADDR_MASK);
893 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
894 MI_COM_REG_ADDR_MASK);
895 frame_val |= (val & MI_COM_DATA_MASK);
896 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
898 tw32_f(MAC_MI_COM, frame_val);
900 loops = PHY_BUSY_LOOPS;
903 frame_val = tr32(MAC_MI_COM);
904 if ((frame_val & MI_COM_BUSY) == 0) {
906 frame_val = tr32(MAC_MI_COM);
916 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
917 tw32_f(MAC_MI_MODE, tp->mi_mode);
924 static int tg3_bmcr_reset(struct tg3 *tp)
929 /* OK, reset it, and poll the BMCR_RESET bit until it
930 * clears or we time out.
932 phy_control = BMCR_RESET;
933 err = tg3_writephy(tp, MII_BMCR, phy_control);
939 err = tg3_readphy(tp, MII_BMCR, &phy_control);
943 if ((phy_control & BMCR_RESET) == 0) {
955 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
957 struct tg3 *tp = bp->priv;
960 spin_lock_bh(&tp->lock);
962 if (tg3_readphy(tp, reg, &val))
965 spin_unlock_bh(&tp->lock);
970 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
972 struct tg3 *tp = bp->priv;
975 spin_lock_bh(&tp->lock);
977 if (tg3_writephy(tp, reg, val))
980 spin_unlock_bh(&tp->lock);
985 static int tg3_mdio_reset(struct mii_bus *bp)
990 static void tg3_mdio_config_5785(struct tg3 *tp)
993 struct phy_device *phydev;
995 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
996 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
997 case PHY_ID_BCM50610:
998 case PHY_ID_BCM50610M:
999 val = MAC_PHYCFG2_50610_LED_MODES;
1001 case PHY_ID_BCMAC131:
1002 val = MAC_PHYCFG2_AC131_LED_MODES;
1004 case PHY_ID_RTL8211C:
1005 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1007 case PHY_ID_RTL8201E:
1008 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1014 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1015 tw32(MAC_PHYCFG2, val);
1017 val = tr32(MAC_PHYCFG1);
1018 val &= ~(MAC_PHYCFG1_RGMII_INT |
1019 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1020 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1021 tw32(MAC_PHYCFG1, val);
1026 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
1027 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1028 MAC_PHYCFG2_FMODE_MASK_MASK |
1029 MAC_PHYCFG2_GMODE_MASK_MASK |
1030 MAC_PHYCFG2_ACT_MASK_MASK |
1031 MAC_PHYCFG2_QUAL_MASK_MASK |
1032 MAC_PHYCFG2_INBAND_ENABLE;
1034 tw32(MAC_PHYCFG2, val);
1036 val = tr32(MAC_PHYCFG1);
1037 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1038 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1039 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1045 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1046 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1047 tw32(MAC_PHYCFG1, val);
1049 val = tr32(MAC_EXT_RGMII_MODE);
1050 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1051 MAC_RGMII_MODE_RX_QUALITY |
1052 MAC_RGMII_MODE_RX_ACTIVITY |
1053 MAC_RGMII_MODE_RX_ENG_DET |
1054 MAC_RGMII_MODE_TX_ENABLE |
1055 MAC_RGMII_MODE_TX_LOWPWR |
1056 MAC_RGMII_MODE_TX_RESET);
1057 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
1058 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1059 val |= MAC_RGMII_MODE_RX_INT_B |
1060 MAC_RGMII_MODE_RX_QUALITY |
1061 MAC_RGMII_MODE_RX_ACTIVITY |
1062 MAC_RGMII_MODE_RX_ENG_DET;
1063 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1064 val |= MAC_RGMII_MODE_TX_ENABLE |
1065 MAC_RGMII_MODE_TX_LOWPWR |
1066 MAC_RGMII_MODE_TX_RESET;
1068 tw32(MAC_EXT_RGMII_MODE, val);
1071 static void tg3_mdio_start(struct tg3 *tp)
1073 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1074 tw32_f(MAC_MI_MODE, tp->mi_mode);
1077 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1078 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1079 tg3_mdio_config_5785(tp);
1082 static int tg3_mdio_init(struct tg3 *tp)
1086 struct phy_device *phydev;
1088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
1092 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1094 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1095 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1097 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1098 TG3_CPMU_PHY_STRAP_IS_SERDES;
1102 tp->phy_addr = TG3_PHY_MII_ADDR;
1106 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
1107 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
1110 tp->mdio_bus = mdiobus_alloc();
1111 if (tp->mdio_bus == NULL)
1114 tp->mdio_bus->name = "tg3 mdio bus";
1115 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1116 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1117 tp->mdio_bus->priv = tp;
1118 tp->mdio_bus->parent = &tp->pdev->dev;
1119 tp->mdio_bus->read = &tg3_mdio_read;
1120 tp->mdio_bus->write = &tg3_mdio_write;
1121 tp->mdio_bus->reset = &tg3_mdio_reset;
1122 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1123 tp->mdio_bus->irq = &tp->mdio_irq[0];
1125 for (i = 0; i < PHY_MAX_ADDR; i++)
1126 tp->mdio_bus->irq[i] = PHY_POLL;
1128 /* The bus registration will look for all the PHYs on the mdio bus.
1129 * Unfortunately, it does not ensure the PHY is powered up before
1130 * accessing the PHY ID registers. A chip reset is the
1131 * quickest way to bring the device back to an operational state..
1133 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1136 i = mdiobus_register(tp->mdio_bus);
1138 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1139 mdiobus_free(tp->mdio_bus);
1143 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1145 if (!phydev || !phydev->drv) {
1146 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1147 mdiobus_unregister(tp->mdio_bus);
1148 mdiobus_free(tp->mdio_bus);
1152 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1153 case PHY_ID_BCM57780:
1154 phydev->interface = PHY_INTERFACE_MODE_GMII;
1155 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1157 case PHY_ID_BCM50610:
1158 case PHY_ID_BCM50610M:
1159 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1160 PHY_BRCM_RX_REFCLK_UNUSED |
1161 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1162 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1163 if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
1164 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1165 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1166 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1167 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1168 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1170 case PHY_ID_RTL8211C:
1171 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1173 case PHY_ID_RTL8201E:
1174 case PHY_ID_BCMAC131:
1175 phydev->interface = PHY_INTERFACE_MODE_MII;
1176 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1177 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1181 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1184 tg3_mdio_config_5785(tp);
1189 static void tg3_mdio_fini(struct tg3 *tp)
1191 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1192 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1193 mdiobus_unregister(tp->mdio_bus);
1194 mdiobus_free(tp->mdio_bus);
1198 /* tp->lock is held. */
1199 static inline void tg3_generate_fw_event(struct tg3 *tp)
1203 val = tr32(GRC_RX_CPU_EVENT);
1204 val |= GRC_RX_CPU_DRIVER_EVENT;
1205 tw32_f(GRC_RX_CPU_EVENT, val);
1207 tp->last_event_jiffies = jiffies;
1210 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1212 /* tp->lock is held. */
1213 static void tg3_wait_for_event_ack(struct tg3 *tp)
1216 unsigned int delay_cnt;
1219 /* If enough time has passed, no wait is necessary. */
1220 time_remain = (long)(tp->last_event_jiffies + 1 +
1221 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1223 if (time_remain < 0)
1226 /* Check if we can shorten the wait time. */
1227 delay_cnt = jiffies_to_usecs(time_remain);
1228 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1229 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1230 delay_cnt = (delay_cnt >> 3) + 1;
1232 for (i = 0; i < delay_cnt; i++) {
1233 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1239 /* tp->lock is held. */
1240 static void tg3_ump_link_report(struct tg3 *tp)
1245 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1246 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1249 tg3_wait_for_event_ack(tp);
1251 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1253 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1256 if (!tg3_readphy(tp, MII_BMCR, ®))
1258 if (!tg3_readphy(tp, MII_BMSR, ®))
1259 val |= (reg & 0xffff);
1260 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1263 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1265 if (!tg3_readphy(tp, MII_LPA, ®))
1266 val |= (reg & 0xffff);
1267 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1270 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1271 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1273 if (!tg3_readphy(tp, MII_STAT1000, ®))
1274 val |= (reg & 0xffff);
1276 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1278 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1282 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1284 tg3_generate_fw_event(tp);
1287 static void tg3_link_report(struct tg3 *tp)
1289 if (!netif_carrier_ok(tp->dev)) {
1290 netif_info(tp, link, tp->dev, "Link is down\n");
1291 tg3_ump_link_report(tp);
1292 } else if (netif_msg_link(tp)) {
1293 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1294 (tp->link_config.active_speed == SPEED_1000 ?
1296 (tp->link_config.active_speed == SPEED_100 ?
1298 (tp->link_config.active_duplex == DUPLEX_FULL ?
1301 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1302 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1304 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1306 tg3_ump_link_report(tp);
1310 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1314 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1315 miireg = ADVERTISE_PAUSE_CAP;
1316 else if (flow_ctrl & FLOW_CTRL_TX)
1317 miireg = ADVERTISE_PAUSE_ASYM;
1318 else if (flow_ctrl & FLOW_CTRL_RX)
1319 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1326 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1330 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1331 miireg = ADVERTISE_1000XPAUSE;
1332 else if (flow_ctrl & FLOW_CTRL_TX)
1333 miireg = ADVERTISE_1000XPSE_ASYM;
1334 else if (flow_ctrl & FLOW_CTRL_RX)
1335 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1342 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1346 if (lcladv & ADVERTISE_1000XPAUSE) {
1347 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1348 if (rmtadv & LPA_1000XPAUSE)
1349 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1350 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1353 if (rmtadv & LPA_1000XPAUSE)
1354 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1356 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1357 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1364 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1368 u32 old_rx_mode = tp->rx_mode;
1369 u32 old_tx_mode = tp->tx_mode;
1371 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1372 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1374 autoneg = tp->link_config.autoneg;
1376 if (autoneg == AUTONEG_ENABLE &&
1377 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1378 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1379 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1381 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1383 flowctrl = tp->link_config.flowctrl;
1385 tp->link_config.active_flowctrl = flowctrl;
1387 if (flowctrl & FLOW_CTRL_RX)
1388 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1390 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1392 if (old_rx_mode != tp->rx_mode)
1393 tw32_f(MAC_RX_MODE, tp->rx_mode);
1395 if (flowctrl & FLOW_CTRL_TX)
1396 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1398 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1400 if (old_tx_mode != tp->tx_mode)
1401 tw32_f(MAC_TX_MODE, tp->tx_mode);
1404 static void tg3_adjust_link(struct net_device *dev)
1406 u8 oldflowctrl, linkmesg = 0;
1407 u32 mac_mode, lcl_adv, rmt_adv;
1408 struct tg3 *tp = netdev_priv(dev);
1409 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1411 spin_lock_bh(&tp->lock);
1413 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1414 MAC_MODE_HALF_DUPLEX);
1416 oldflowctrl = tp->link_config.active_flowctrl;
1422 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1423 mac_mode |= MAC_MODE_PORT_MODE_MII;
1424 else if (phydev->speed == SPEED_1000 ||
1425 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1426 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1428 mac_mode |= MAC_MODE_PORT_MODE_MII;
1430 if (phydev->duplex == DUPLEX_HALF)
1431 mac_mode |= MAC_MODE_HALF_DUPLEX;
1433 lcl_adv = tg3_advert_flowctrl_1000T(
1434 tp->link_config.flowctrl);
1437 rmt_adv = LPA_PAUSE_CAP;
1438 if (phydev->asym_pause)
1439 rmt_adv |= LPA_PAUSE_ASYM;
1442 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1444 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1446 if (mac_mode != tp->mac_mode) {
1447 tp->mac_mode = mac_mode;
1448 tw32_f(MAC_MODE, tp->mac_mode);
1452 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1453 if (phydev->speed == SPEED_10)
1455 MAC_MI_STAT_10MBPS_MODE |
1456 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1458 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1461 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1462 tw32(MAC_TX_LENGTHS,
1463 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1464 (6 << TX_LENGTHS_IPG_SHIFT) |
1465 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1467 tw32(MAC_TX_LENGTHS,
1468 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1469 (6 << TX_LENGTHS_IPG_SHIFT) |
1470 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1472 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1473 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1474 phydev->speed != tp->link_config.active_speed ||
1475 phydev->duplex != tp->link_config.active_duplex ||
1476 oldflowctrl != tp->link_config.active_flowctrl)
1479 tp->link_config.active_speed = phydev->speed;
1480 tp->link_config.active_duplex = phydev->duplex;
1482 spin_unlock_bh(&tp->lock);
1485 tg3_link_report(tp);
1488 static int tg3_phy_init(struct tg3 *tp)
1490 struct phy_device *phydev;
1492 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1495 /* Bring the PHY back to a known state. */
1498 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1500 /* Attach the MAC to the PHY. */
1501 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1502 phydev->dev_flags, phydev->interface);
1503 if (IS_ERR(phydev)) {
1504 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1505 return PTR_ERR(phydev);
1508 /* Mask with MAC supported features. */
1509 switch (phydev->interface) {
1510 case PHY_INTERFACE_MODE_GMII:
1511 case PHY_INTERFACE_MODE_RGMII:
1512 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1513 phydev->supported &= (PHY_GBIT_FEATURES |
1515 SUPPORTED_Asym_Pause);
1519 case PHY_INTERFACE_MODE_MII:
1520 phydev->supported &= (PHY_BASIC_FEATURES |
1522 SUPPORTED_Asym_Pause);
1525 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1529 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1531 phydev->advertising = phydev->supported;
1536 static void tg3_phy_start(struct tg3 *tp)
1538 struct phy_device *phydev;
1540 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1543 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1545 if (tp->link_config.phy_is_low_power) {
1546 tp->link_config.phy_is_low_power = 0;
1547 phydev->speed = tp->link_config.orig_speed;
1548 phydev->duplex = tp->link_config.orig_duplex;
1549 phydev->autoneg = tp->link_config.orig_autoneg;
1550 phydev->advertising = tp->link_config.orig_advertising;
1555 phy_start_aneg(phydev);
1558 static void tg3_phy_stop(struct tg3 *tp)
1560 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1563 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1566 static void tg3_phy_fini(struct tg3 *tp)
1568 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1569 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1570 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1574 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1576 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1577 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1580 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1584 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1587 tg3_writephy(tp, MII_TG3_FET_TEST,
1588 phytest | MII_TG3_FET_SHADOW_EN);
1589 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1591 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1593 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1594 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1596 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1600 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1604 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1605 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1607 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
1610 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1611 tg3_phy_fet_toggle_apd(tp, enable);
1615 reg = MII_TG3_MISC_SHDW_WREN |
1616 MII_TG3_MISC_SHDW_SCR5_SEL |
1617 MII_TG3_MISC_SHDW_SCR5_LPED |
1618 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1619 MII_TG3_MISC_SHDW_SCR5_SDTL |
1620 MII_TG3_MISC_SHDW_SCR5_C125OE;
1621 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1622 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1624 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1627 reg = MII_TG3_MISC_SHDW_WREN |
1628 MII_TG3_MISC_SHDW_APD_SEL |
1629 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1631 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1633 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1636 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1641 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1644 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
1647 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1648 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1650 tg3_writephy(tp, MII_TG3_FET_TEST,
1651 ephy | MII_TG3_FET_SHADOW_EN);
1652 if (!tg3_readphy(tp, reg, &phy)) {
1654 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1656 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1657 tg3_writephy(tp, reg, phy);
1659 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1662 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1663 MII_TG3_AUXCTL_SHDWSEL_MISC;
1664 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1665 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1667 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1669 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1670 phy |= MII_TG3_AUXCTL_MISC_WREN;
1671 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1676 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1680 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1683 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1684 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1685 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1686 (val | (1 << 15) | (1 << 4)));
1689 static void tg3_phy_apply_otp(struct tg3 *tp)
1698 /* Enable SM_DSP clock and tx 6dB coding. */
1699 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1700 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1701 MII_TG3_AUXCTL_ACTL_TX_6DB;
1702 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1704 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1705 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1706 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1708 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1709 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1710 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1712 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1713 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1714 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1716 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1717 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1719 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1720 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1722 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1723 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1724 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1726 /* Turn off SM_DSP clock. */
1727 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1728 MII_TG3_AUXCTL_ACTL_TX_6DB;
1729 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1732 static int tg3_wait_macro_done(struct tg3 *tp)
1739 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1740 if ((tmp32 & 0x1000) == 0)
1750 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1752 static const u32 test_pat[4][6] = {
1753 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1754 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1755 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1756 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1760 for (chan = 0; chan < 4; chan++) {
1763 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1764 (chan * 0x2000) | 0x0200);
1765 tg3_writephy(tp, 0x16, 0x0002);
1767 for (i = 0; i < 6; i++)
1768 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1771 tg3_writephy(tp, 0x16, 0x0202);
1772 if (tg3_wait_macro_done(tp)) {
1777 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1778 (chan * 0x2000) | 0x0200);
1779 tg3_writephy(tp, 0x16, 0x0082);
1780 if (tg3_wait_macro_done(tp)) {
1785 tg3_writephy(tp, 0x16, 0x0802);
1786 if (tg3_wait_macro_done(tp)) {
1791 for (i = 0; i < 6; i += 2) {
1794 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1795 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1796 tg3_wait_macro_done(tp)) {
1802 if (low != test_pat[chan][i] ||
1803 high != test_pat[chan][i+1]) {
1804 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1805 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1806 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1816 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1820 for (chan = 0; chan < 4; chan++) {
1823 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1824 (chan * 0x2000) | 0x0200);
1825 tg3_writephy(tp, 0x16, 0x0002);
1826 for (i = 0; i < 6; i++)
1827 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1828 tg3_writephy(tp, 0x16, 0x0202);
1829 if (tg3_wait_macro_done(tp))
1836 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1838 u32 reg32, phy9_orig;
1839 int retries, do_phy_reset, err;
1845 err = tg3_bmcr_reset(tp);
1851 /* Disable transmitter and interrupt. */
1852 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1856 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1858 /* Set full-duplex, 1000 mbps. */
1859 tg3_writephy(tp, MII_BMCR,
1860 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1862 /* Set to master mode. */
1863 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1866 tg3_writephy(tp, MII_TG3_CTRL,
1867 (MII_TG3_CTRL_AS_MASTER |
1868 MII_TG3_CTRL_ENABLE_AS_MASTER));
1870 /* Enable SM_DSP_CLOCK and 6dB. */
1871 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1873 /* Block the PHY control access. */
1874 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1875 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1877 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1880 } while (--retries);
1882 err = tg3_phy_reset_chanpat(tp);
1886 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1887 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1889 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1890 tg3_writephy(tp, 0x16, 0x0000);
1892 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1893 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1894 /* Set Extended packet length bit for jumbo frames */
1895 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1897 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1900 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1902 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1904 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1911 /* This will reset the tigon3 PHY if there is no valid
1912 * link unless the FORCE argument is non-zero.
1914 static int tg3_phy_reset(struct tg3 *tp)
1920 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1923 val = tr32(GRC_MISC_CFG);
1924 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1927 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1928 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1932 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1933 netif_carrier_off(tp->dev);
1934 tg3_link_report(tp);
1937 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1938 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1939 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1940 err = tg3_phy_reset_5703_4_5(tp);
1947 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1948 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1949 cpmuctrl = tr32(TG3_CPMU_CTRL);
1950 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1952 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1955 err = tg3_bmcr_reset(tp);
1959 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1962 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1963 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1965 tw32(TG3_CPMU_CTRL, cpmuctrl);
1968 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1969 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1972 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1973 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1974 CPMU_LSPD_1000MB_MACCLK_12_5) {
1975 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1977 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1981 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) &&
1983 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
1986 tg3_phy_apply_otp(tp);
1988 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
1989 tg3_phy_toggle_apd(tp, true);
1991 tg3_phy_toggle_apd(tp, false);
1994 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1995 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1996 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1997 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1998 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1999 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
2000 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2002 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
2003 tg3_writephy(tp, 0x1c, 0x8d68);
2004 tg3_writephy(tp, 0x1c, 0x8d68);
2006 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
2007 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2008 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2009 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
2010 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2011 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
2012 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
2013 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
2014 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2015 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
2016 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
2017 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2018 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
2019 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2020 tg3_writephy(tp, MII_TG3_TEST1,
2021 MII_TG3_TEST1_TRIM_EN | 0x4);
2023 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2024 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
2026 /* Set Extended packet length bit (bit 14) on all chips that */
2027 /* support jumbo frames */
2028 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2029 /* Cannot do read-modify-write on 5401 */
2030 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2031 } else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2034 /* Set bit 14 with read-modify-write to preserve other bits */
2035 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
2036 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
2037 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
2040 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2041 * jumbo frames transmission.
2043 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
2046 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2047 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2048 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2051 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2052 /* adjust output voltage */
2053 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2056 tg3_phy_toggle_automdix(tp, 1);
2057 tg3_phy_set_wirespeed(tp);
2061 static void tg3_frob_aux_power(struct tg3 *tp)
2063 struct tg3 *tp_peer = tp;
2065 /* The GPIOs do something completely different on 57765. */
2066 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
2067 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2068 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2071 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2072 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2073 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
2074 struct net_device *dev_peer;
2076 dev_peer = pci_get_drvdata(tp->pdev_peer);
2077 /* remove_one() may have been run on the peer. */
2081 tp_peer = netdev_priv(dev_peer);
2084 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2085 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
2086 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
2087 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
2088 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2090 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2091 (GRC_LCLCTRL_GPIO_OE0 |
2092 GRC_LCLCTRL_GPIO_OE1 |
2093 GRC_LCLCTRL_GPIO_OE2 |
2094 GRC_LCLCTRL_GPIO_OUTPUT0 |
2095 GRC_LCLCTRL_GPIO_OUTPUT1),
2097 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2098 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2099 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2100 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2101 GRC_LCLCTRL_GPIO_OE1 |
2102 GRC_LCLCTRL_GPIO_OE2 |
2103 GRC_LCLCTRL_GPIO_OUTPUT0 |
2104 GRC_LCLCTRL_GPIO_OUTPUT1 |
2106 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2108 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2109 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2111 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2112 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2115 u32 grc_local_ctrl = 0;
2117 if (tp_peer != tp &&
2118 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2121 /* Workaround to prevent overdrawing Amps. */
2122 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2124 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2125 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2126 grc_local_ctrl, 100);
2129 /* On 5753 and variants, GPIO2 cannot be used. */
2130 no_gpio2 = tp->nic_sram_data_cfg &
2131 NIC_SRAM_DATA_CFG_NO_GPIO2;
2133 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2134 GRC_LCLCTRL_GPIO_OE1 |
2135 GRC_LCLCTRL_GPIO_OE2 |
2136 GRC_LCLCTRL_GPIO_OUTPUT1 |
2137 GRC_LCLCTRL_GPIO_OUTPUT2;
2139 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2140 GRC_LCLCTRL_GPIO_OUTPUT2);
2142 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2143 grc_local_ctrl, 100);
2145 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2147 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2148 grc_local_ctrl, 100);
2151 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2152 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2153 grc_local_ctrl, 100);
2157 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2158 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2159 if (tp_peer != tp &&
2160 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
2163 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2164 (GRC_LCLCTRL_GPIO_OE1 |
2165 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2167 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2168 GRC_LCLCTRL_GPIO_OE1, 100);
2170 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2171 (GRC_LCLCTRL_GPIO_OE1 |
2172 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2177 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2179 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2181 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2182 if (speed != SPEED_10)
2184 } else if (speed == SPEED_10)
2190 static int tg3_setup_phy(struct tg3 *, int);
2192 #define RESET_KIND_SHUTDOWN 0
2193 #define RESET_KIND_INIT 1
2194 #define RESET_KIND_SUSPEND 2
2196 static void tg3_write_sig_post_reset(struct tg3 *, int);
2197 static int tg3_halt_cpu(struct tg3 *, u32);
2199 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2203 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2204 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2205 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2206 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2209 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2210 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2211 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2218 val = tr32(GRC_MISC_CFG);
2219 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2222 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2224 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2227 tg3_writephy(tp, MII_ADVERTISE, 0);
2228 tg3_writephy(tp, MII_BMCR,
2229 BMCR_ANENABLE | BMCR_ANRESTART);
2231 tg3_writephy(tp, MII_TG3_FET_TEST,
2232 phytest | MII_TG3_FET_SHADOW_EN);
2233 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2234 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2236 MII_TG3_FET_SHDW_AUXMODE4,
2239 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2242 } else if (do_low_power) {
2243 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2244 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2246 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2247 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2248 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2249 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2250 MII_TG3_AUXCTL_PCTL_VREG_11V);
2253 /* The PHY should not be powered down on some chips because
2256 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2257 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2258 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2259 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2262 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2263 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2264 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2265 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2266 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2267 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2270 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2273 /* tp->lock is held. */
2274 static int tg3_nvram_lock(struct tg3 *tp)
2276 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2279 if (tp->nvram_lock_cnt == 0) {
2280 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2281 for (i = 0; i < 8000; i++) {
2282 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2287 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2291 tp->nvram_lock_cnt++;
2296 /* tp->lock is held. */
2297 static void tg3_nvram_unlock(struct tg3 *tp)
2299 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
2300 if (tp->nvram_lock_cnt > 0)
2301 tp->nvram_lock_cnt--;
2302 if (tp->nvram_lock_cnt == 0)
2303 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2307 /* tp->lock is held. */
2308 static void tg3_enable_nvram_access(struct tg3 *tp)
2310 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2311 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2312 u32 nvaccess = tr32(NVRAM_ACCESS);
2314 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2318 /* tp->lock is held. */
2319 static void tg3_disable_nvram_access(struct tg3 *tp)
2321 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2322 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2323 u32 nvaccess = tr32(NVRAM_ACCESS);
2325 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2329 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2330 u32 offset, u32 *val)
2335 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2338 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2339 EEPROM_ADDR_DEVID_MASK |
2341 tw32(GRC_EEPROM_ADDR,
2343 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2344 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2345 EEPROM_ADDR_ADDR_MASK) |
2346 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2348 for (i = 0; i < 1000; i++) {
2349 tmp = tr32(GRC_EEPROM_ADDR);
2351 if (tmp & EEPROM_ADDR_COMPLETE)
2355 if (!(tmp & EEPROM_ADDR_COMPLETE))
2358 tmp = tr32(GRC_EEPROM_DATA);
2361 * The data will always be opposite the native endian
2362 * format. Perform a blind byteswap to compensate.
2369 #define NVRAM_CMD_TIMEOUT 10000
2371 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2375 tw32(NVRAM_CMD, nvram_cmd);
2376 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2378 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2384 if (i == NVRAM_CMD_TIMEOUT)
2390 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2392 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2393 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2394 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2395 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2396 (tp->nvram_jedecnum == JEDEC_ATMEL))
2398 addr = ((addr / tp->nvram_pagesize) <<
2399 ATMEL_AT45DB0X1B_PAGE_POS) +
2400 (addr % tp->nvram_pagesize);
2405 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2407 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
2408 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
2409 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
2410 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
2411 (tp->nvram_jedecnum == JEDEC_ATMEL))
2413 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2414 tp->nvram_pagesize) +
2415 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2420 /* NOTE: Data read in from NVRAM is byteswapped according to
2421 * the byteswapping settings for all other register accesses.
2422 * tg3 devices are BE devices, so on a BE machine, the data
2423 * returned will be exactly as it is seen in NVRAM. On a LE
2424 * machine, the 32-bit value will be byteswapped.
2426 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2430 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
2431 return tg3_nvram_read_using_eeprom(tp, offset, val);
2433 offset = tg3_nvram_phys_addr(tp, offset);
2435 if (offset > NVRAM_ADDR_MSK)
2438 ret = tg3_nvram_lock(tp);
2442 tg3_enable_nvram_access(tp);
2444 tw32(NVRAM_ADDR, offset);
2445 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2446 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2449 *val = tr32(NVRAM_RDDATA);
2451 tg3_disable_nvram_access(tp);
2453 tg3_nvram_unlock(tp);
2458 /* Ensures NVRAM data is in bytestream format. */
2459 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2462 int res = tg3_nvram_read(tp, offset, &v);
2464 *val = cpu_to_be32(v);
2468 /* tp->lock is held. */
2469 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2471 u32 addr_high, addr_low;
2474 addr_high = ((tp->dev->dev_addr[0] << 8) |
2475 tp->dev->dev_addr[1]);
2476 addr_low = ((tp->dev->dev_addr[2] << 24) |
2477 (tp->dev->dev_addr[3] << 16) |
2478 (tp->dev->dev_addr[4] << 8) |
2479 (tp->dev->dev_addr[5] << 0));
2480 for (i = 0; i < 4; i++) {
2481 if (i == 1 && skip_mac_1)
2483 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2484 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2487 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2489 for (i = 0; i < 12; i++) {
2490 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2491 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2495 addr_high = (tp->dev->dev_addr[0] +
2496 tp->dev->dev_addr[1] +
2497 tp->dev->dev_addr[2] +
2498 tp->dev->dev_addr[3] +
2499 tp->dev->dev_addr[4] +
2500 tp->dev->dev_addr[5]) &
2501 TX_BACKOFF_SEED_MASK;
2502 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2505 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2508 bool device_should_wake, do_low_power;
2510 /* Make sure register accesses (indirect or otherwise)
2511 * will function correctly.
2513 pci_write_config_dword(tp->pdev,
2514 TG3PCI_MISC_HOST_CTRL,
2515 tp->misc_host_ctrl);
2519 pci_enable_wake(tp->pdev, state, false);
2520 pci_set_power_state(tp->pdev, PCI_D0);
2522 /* Switch out of Vaux if it is a NIC */
2523 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2524 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2534 netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
2539 /* Restore the CLKREQ setting. */
2540 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
2543 pci_read_config_word(tp->pdev,
2544 tp->pcie_cap + PCI_EXP_LNKCTL,
2546 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2547 pci_write_config_word(tp->pdev,
2548 tp->pcie_cap + PCI_EXP_LNKCTL,
2552 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2553 tw32(TG3PCI_MISC_HOST_CTRL,
2554 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2556 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2557 device_may_wakeup(&tp->pdev->dev) &&
2558 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2560 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2561 do_low_power = false;
2562 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2563 !tp->link_config.phy_is_low_power) {
2564 struct phy_device *phydev;
2565 u32 phyid, advertising;
2567 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2569 tp->link_config.phy_is_low_power = 1;
2571 tp->link_config.orig_speed = phydev->speed;
2572 tp->link_config.orig_duplex = phydev->duplex;
2573 tp->link_config.orig_autoneg = phydev->autoneg;
2574 tp->link_config.orig_advertising = phydev->advertising;
2576 advertising = ADVERTISED_TP |
2578 ADVERTISED_Autoneg |
2579 ADVERTISED_10baseT_Half;
2581 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2582 device_should_wake) {
2583 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2585 ADVERTISED_100baseT_Half |
2586 ADVERTISED_100baseT_Full |
2587 ADVERTISED_10baseT_Full;
2589 advertising |= ADVERTISED_10baseT_Full;
2592 phydev->advertising = advertising;
2594 phy_start_aneg(phydev);
2596 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2597 if (phyid != PHY_ID_BCMAC131) {
2598 phyid &= PHY_BCM_OUI_MASK;
2599 if (phyid == PHY_BCM_OUI_1 ||
2600 phyid == PHY_BCM_OUI_2 ||
2601 phyid == PHY_BCM_OUI_3)
2602 do_low_power = true;
2606 do_low_power = true;
2608 if (tp->link_config.phy_is_low_power == 0) {
2609 tp->link_config.phy_is_low_power = 1;
2610 tp->link_config.orig_speed = tp->link_config.speed;
2611 tp->link_config.orig_duplex = tp->link_config.duplex;
2612 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2615 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2616 tp->link_config.speed = SPEED_10;
2617 tp->link_config.duplex = DUPLEX_HALF;
2618 tp->link_config.autoneg = AUTONEG_ENABLE;
2619 tg3_setup_phy(tp, 0);
2623 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2626 val = tr32(GRC_VCPU_EXT_CTRL);
2627 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2628 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2632 for (i = 0; i < 200; i++) {
2633 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2634 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2639 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2640 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2641 WOL_DRV_STATE_SHUTDOWN |
2645 if (device_should_wake) {
2648 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2650 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2654 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2655 mac_mode = MAC_MODE_PORT_MODE_GMII;
2657 mac_mode = MAC_MODE_PORT_MODE_MII;
2659 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2660 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2662 u32 speed = (tp->tg3_flags &
2663 TG3_FLAG_WOL_SPEED_100MB) ?
2664 SPEED_100 : SPEED_10;
2665 if (tg3_5700_link_polarity(tp, speed))
2666 mac_mode |= MAC_MODE_LINK_POLARITY;
2668 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2671 mac_mode = MAC_MODE_PORT_MODE_TBI;
2674 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2675 tw32(MAC_LED_CTRL, tp->led_ctrl);
2677 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2678 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2679 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2680 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2681 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2682 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2684 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2685 mac_mode |= tp->mac_mode &
2686 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2687 if (mac_mode & MAC_MODE_APE_TX_EN)
2688 mac_mode |= MAC_MODE_TDE_ENABLE;
2691 tw32_f(MAC_MODE, mac_mode);
2694 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2698 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2699 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2700 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2703 base_val = tp->pci_clock_ctrl;
2704 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2705 CLOCK_CTRL_TXCLK_DISABLE);
2707 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2708 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2709 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2710 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2711 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2713 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2714 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2715 u32 newbits1, newbits2;
2717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2718 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2719 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2720 CLOCK_CTRL_TXCLK_DISABLE |
2722 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2723 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2724 newbits1 = CLOCK_CTRL_625_CORE;
2725 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2727 newbits1 = CLOCK_CTRL_ALTCLK;
2728 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2731 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2734 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2737 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2741 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2742 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2743 CLOCK_CTRL_TXCLK_DISABLE |
2744 CLOCK_CTRL_44MHZ_CORE);
2746 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2749 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2750 tp->pci_clock_ctrl | newbits3, 40);
2754 if (!(device_should_wake) &&
2755 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
2756 tg3_power_down_phy(tp, do_low_power);
2758 tg3_frob_aux_power(tp);
2760 /* Workaround for unstable PLL clock */
2761 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2762 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2763 u32 val = tr32(0x7d00);
2765 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2767 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2770 err = tg3_nvram_lock(tp);
2771 tg3_halt_cpu(tp, RX_CPU_BASE);
2773 tg3_nvram_unlock(tp);
2777 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2779 if (device_should_wake)
2780 pci_enable_wake(tp->pdev, state, true);
2782 /* Finally, set the new power state. */
2783 pci_set_power_state(tp->pdev, state);
2788 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2790 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2791 case MII_TG3_AUX_STAT_10HALF:
2793 *duplex = DUPLEX_HALF;
2796 case MII_TG3_AUX_STAT_10FULL:
2798 *duplex = DUPLEX_FULL;
2801 case MII_TG3_AUX_STAT_100HALF:
2803 *duplex = DUPLEX_HALF;
2806 case MII_TG3_AUX_STAT_100FULL:
2808 *duplex = DUPLEX_FULL;
2811 case MII_TG3_AUX_STAT_1000HALF:
2812 *speed = SPEED_1000;
2813 *duplex = DUPLEX_HALF;
2816 case MII_TG3_AUX_STAT_1000FULL:
2817 *speed = SPEED_1000;
2818 *duplex = DUPLEX_FULL;
2822 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2823 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2825 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2829 *speed = SPEED_INVALID;
2830 *duplex = DUPLEX_INVALID;
2835 static void tg3_phy_copper_begin(struct tg3 *tp)
2840 if (tp->link_config.phy_is_low_power) {
2841 /* Entering low power mode. Disable gigabit and
2842 * 100baseT advertisements.
2844 tg3_writephy(tp, MII_TG3_CTRL, 0);
2846 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2847 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2848 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2849 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2851 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2852 } else if (tp->link_config.speed == SPEED_INVALID) {
2853 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2854 tp->link_config.advertising &=
2855 ~(ADVERTISED_1000baseT_Half |
2856 ADVERTISED_1000baseT_Full);
2858 new_adv = ADVERTISE_CSMA;
2859 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2860 new_adv |= ADVERTISE_10HALF;
2861 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2862 new_adv |= ADVERTISE_10FULL;
2863 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2864 new_adv |= ADVERTISE_100HALF;
2865 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2866 new_adv |= ADVERTISE_100FULL;
2868 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2870 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2872 if (tp->link_config.advertising &
2873 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2875 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2876 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2877 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2878 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2879 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2880 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2881 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2882 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2883 MII_TG3_CTRL_ENABLE_AS_MASTER);
2884 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2886 tg3_writephy(tp, MII_TG3_CTRL, 0);
2889 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2890 new_adv |= ADVERTISE_CSMA;
2892 /* Asking for a specific link mode. */
2893 if (tp->link_config.speed == SPEED_1000) {
2894 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2896 if (tp->link_config.duplex == DUPLEX_FULL)
2897 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2899 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2900 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2901 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2902 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2903 MII_TG3_CTRL_ENABLE_AS_MASTER);
2905 if (tp->link_config.speed == SPEED_100) {
2906 if (tp->link_config.duplex == DUPLEX_FULL)
2907 new_adv |= ADVERTISE_100FULL;
2909 new_adv |= ADVERTISE_100HALF;
2911 if (tp->link_config.duplex == DUPLEX_FULL)
2912 new_adv |= ADVERTISE_10FULL;
2914 new_adv |= ADVERTISE_10HALF;
2916 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2921 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2924 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2925 tp->link_config.speed != SPEED_INVALID) {
2926 u32 bmcr, orig_bmcr;
2928 tp->link_config.active_speed = tp->link_config.speed;
2929 tp->link_config.active_duplex = tp->link_config.duplex;
2932 switch (tp->link_config.speed) {
2938 bmcr |= BMCR_SPEED100;
2942 bmcr |= TG3_BMCR_SPEED1000;
2946 if (tp->link_config.duplex == DUPLEX_FULL)
2947 bmcr |= BMCR_FULLDPLX;
2949 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2950 (bmcr != orig_bmcr)) {
2951 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2952 for (i = 0; i < 1500; i++) {
2956 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2957 tg3_readphy(tp, MII_BMSR, &tmp))
2959 if (!(tmp & BMSR_LSTATUS)) {
2964 tg3_writephy(tp, MII_BMCR, bmcr);
2968 tg3_writephy(tp, MII_BMCR,
2969 BMCR_ANENABLE | BMCR_ANRESTART);
2973 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2977 /* Turn off tap power management. */
2978 /* Set Extended packet length bit */
2979 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2981 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2982 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2984 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2985 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2987 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2988 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2990 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2991 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2993 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2994 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
3001 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3003 u32 adv_reg, all_mask = 0;
3005 if (mask & ADVERTISED_10baseT_Half)
3006 all_mask |= ADVERTISE_10HALF;
3007 if (mask & ADVERTISED_10baseT_Full)
3008 all_mask |= ADVERTISE_10FULL;
3009 if (mask & ADVERTISED_100baseT_Half)
3010 all_mask |= ADVERTISE_100HALF;
3011 if (mask & ADVERTISED_100baseT_Full)
3012 all_mask |= ADVERTISE_100FULL;
3014 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3017 if ((adv_reg & all_mask) != all_mask)
3019 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
3023 if (mask & ADVERTISED_1000baseT_Half)
3024 all_mask |= ADVERTISE_1000HALF;
3025 if (mask & ADVERTISED_1000baseT_Full)
3026 all_mask |= ADVERTISE_1000FULL;
3028 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3031 if ((tg3_ctrl & all_mask) != all_mask)
3037 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3041 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3044 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3045 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3047 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3048 if (curadv != reqadv)
3051 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
3052 tg3_readphy(tp, MII_LPA, rmtadv);
3054 /* Reprogram the advertisement register, even if it
3055 * does not affect the current link. If the link
3056 * gets renegotiated in the future, we can save an
3057 * additional renegotiation cycle by advertising
3058 * it correctly in the first place.
3060 if (curadv != reqadv) {
3061 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3062 ADVERTISE_PAUSE_ASYM);
3063 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3070 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3072 int current_link_up;
3074 u32 lcl_adv, rmt_adv;
3082 (MAC_STATUS_SYNC_CHANGED |
3083 MAC_STATUS_CFG_CHANGED |
3084 MAC_STATUS_MI_COMPLETION |
3085 MAC_STATUS_LNKSTATE_CHANGED));
3088 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3090 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3094 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
3096 /* Some third-party PHYs need to be reset on link going
3099 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3100 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3102 netif_carrier_ok(tp->dev)) {
3103 tg3_readphy(tp, MII_BMSR, &bmsr);
3104 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3105 !(bmsr & BMSR_LSTATUS))
3111 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3112 tg3_readphy(tp, MII_BMSR, &bmsr);
3113 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3114 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
3117 if (!(bmsr & BMSR_LSTATUS)) {
3118 err = tg3_init_5401phy_dsp(tp);
3122 tg3_readphy(tp, MII_BMSR, &bmsr);
3123 for (i = 0; i < 1000; i++) {
3125 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3126 (bmsr & BMSR_LSTATUS)) {
3132 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3133 TG3_PHY_REV_BCM5401_B0 &&
3134 !(bmsr & BMSR_LSTATUS) &&
3135 tp->link_config.active_speed == SPEED_1000) {
3136 err = tg3_phy_reset(tp);
3138 err = tg3_init_5401phy_dsp(tp);
3143 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3144 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3145 /* 5701 {A0,B0} CRC bug workaround */
3146 tg3_writephy(tp, 0x15, 0x0a75);
3147 tg3_writephy(tp, 0x1c, 0x8c68);
3148 tg3_writephy(tp, 0x1c, 0x8d68);
3149 tg3_writephy(tp, 0x1c, 0x8c68);
3152 /* Clear pending interrupts... */
3153 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3154 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
3156 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
3157 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3158 else if (!(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
3159 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3161 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3162 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3163 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3164 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3165 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3167 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3170 current_link_up = 0;
3171 current_speed = SPEED_INVALID;
3172 current_duplex = DUPLEX_INVALID;
3174 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
3177 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
3178 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
3179 if (!(val & (1 << 10))) {
3181 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
3187 for (i = 0; i < 100; i++) {
3188 tg3_readphy(tp, MII_BMSR, &bmsr);
3189 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3190 (bmsr & BMSR_LSTATUS))
3195 if (bmsr & BMSR_LSTATUS) {
3198 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3199 for (i = 0; i < 2000; i++) {
3201 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3206 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3211 for (i = 0; i < 200; i++) {
3212 tg3_readphy(tp, MII_BMCR, &bmcr);
3213 if (tg3_readphy(tp, MII_BMCR, &bmcr))
3215 if (bmcr && bmcr != 0x7fff)
3223 tp->link_config.active_speed = current_speed;
3224 tp->link_config.active_duplex = current_duplex;
3226 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3227 if ((bmcr & BMCR_ANENABLE) &&
3228 tg3_copper_is_advertising_all(tp,
3229 tp->link_config.advertising)) {
3230 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3232 current_link_up = 1;
3235 if (!(bmcr & BMCR_ANENABLE) &&
3236 tp->link_config.speed == current_speed &&
3237 tp->link_config.duplex == current_duplex &&
3238 tp->link_config.flowctrl ==
3239 tp->link_config.active_flowctrl) {
3240 current_link_up = 1;
3244 if (current_link_up == 1 &&
3245 tp->link_config.active_duplex == DUPLEX_FULL)
3246 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3250 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
3253 tg3_phy_copper_begin(tp);
3255 tg3_readphy(tp, MII_BMSR, &tmp);
3256 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
3257 (tmp & BMSR_LSTATUS))
3258 current_link_up = 1;
3261 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3262 if (current_link_up == 1) {
3263 if (tp->link_config.active_speed == SPEED_100 ||
3264 tp->link_config.active_speed == SPEED_10)
3265 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3267 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3268 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)
3269 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3271 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3273 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3274 if (tp->link_config.active_duplex == DUPLEX_HALF)
3275 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3277 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3278 if (current_link_up == 1 &&
3279 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3280 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3282 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3285 /* ??? Without this setting Netgear GA302T PHY does not
3286 * ??? send/receive packets...
3288 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3289 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3290 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3291 tw32_f(MAC_MI_MODE, tp->mi_mode);
3295 tw32_f(MAC_MODE, tp->mac_mode);
3298 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
3299 /* Polled via timer. */
3300 tw32_f(MAC_EVENT, 0);
3302 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3306 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3307 current_link_up == 1 &&
3308 tp->link_config.active_speed == SPEED_1000 &&
3309 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
3310 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
3313 (MAC_STATUS_SYNC_CHANGED |
3314 MAC_STATUS_CFG_CHANGED));
3317 NIC_SRAM_FIRMWARE_MBOX,
3318 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3321 /* Prevent send BD corruption. */
3322 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
3323 u16 oldlnkctl, newlnkctl;
3325 pci_read_config_word(tp->pdev,
3326 tp->pcie_cap + PCI_EXP_LNKCTL,
3328 if (tp->link_config.active_speed == SPEED_100 ||
3329 tp->link_config.active_speed == SPEED_10)
3330 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3332 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3333 if (newlnkctl != oldlnkctl)
3334 pci_write_config_word(tp->pdev,
3335 tp->pcie_cap + PCI_EXP_LNKCTL,
3339 if (current_link_up != netif_carrier_ok(tp->dev)) {
3340 if (current_link_up)
3341 netif_carrier_on(tp->dev);
3343 netif_carrier_off(tp->dev);
3344 tg3_link_report(tp);
3350 struct tg3_fiber_aneginfo {
3352 #define ANEG_STATE_UNKNOWN 0
3353 #define ANEG_STATE_AN_ENABLE 1
3354 #define ANEG_STATE_RESTART_INIT 2
3355 #define ANEG_STATE_RESTART 3
3356 #define ANEG_STATE_DISABLE_LINK_OK 4
3357 #define ANEG_STATE_ABILITY_DETECT_INIT 5
3358 #define ANEG_STATE_ABILITY_DETECT 6
3359 #define ANEG_STATE_ACK_DETECT_INIT 7
3360 #define ANEG_STATE_ACK_DETECT 8
3361 #define ANEG_STATE_COMPLETE_ACK_INIT 9
3362 #define ANEG_STATE_COMPLETE_ACK 10
3363 #define ANEG_STATE_IDLE_DETECT_INIT 11
3364 #define ANEG_STATE_IDLE_DETECT 12
3365 #define ANEG_STATE_LINK_OK 13
3366 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
3367 #define ANEG_STATE_NEXT_PAGE_WAIT 15
3370 #define MR_AN_ENABLE 0x00000001
3371 #define MR_RESTART_AN 0x00000002
3372 #define MR_AN_COMPLETE 0x00000004
3373 #define MR_PAGE_RX 0x00000008
3374 #define MR_NP_LOADED 0x00000010
3375 #define MR_TOGGLE_TX 0x00000020
3376 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
3377 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
3378 #define MR_LP_ADV_SYM_PAUSE 0x00000100
3379 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
3380 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3381 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3382 #define MR_LP_ADV_NEXT_PAGE 0x00001000
3383 #define MR_TOGGLE_RX 0x00002000
3384 #define MR_NP_RX 0x00004000
3386 #define MR_LINK_OK 0x80000000
3388 unsigned long link_time, cur_time;
3390 u32 ability_match_cfg;
3391 int ability_match_count;
3393 char ability_match, idle_match, ack_match;
3395 u32 txconfig, rxconfig;
3396 #define ANEG_CFG_NP 0x00000080
3397 #define ANEG_CFG_ACK 0x00000040
3398 #define ANEG_CFG_RF2 0x00000020
3399 #define ANEG_CFG_RF1 0x00000010
3400 #define ANEG_CFG_PS2 0x00000001
3401 #define ANEG_CFG_PS1 0x00008000
3402 #define ANEG_CFG_HD 0x00004000
3403 #define ANEG_CFG_FD 0x00002000
3404 #define ANEG_CFG_INVAL 0x00001f06
3409 #define ANEG_TIMER_ENAB 2
3410 #define ANEG_FAILED -1
3412 #define ANEG_STATE_SETTLE_TIME 10000
3414 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3415 struct tg3_fiber_aneginfo *ap)
3418 unsigned long delta;
3422 if (ap->state == ANEG_STATE_UNKNOWN) {
3426 ap->ability_match_cfg = 0;
3427 ap->ability_match_count = 0;
3428 ap->ability_match = 0;
3434 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3435 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3437 if (rx_cfg_reg != ap->ability_match_cfg) {
3438 ap->ability_match_cfg = rx_cfg_reg;
3439 ap->ability_match = 0;
3440 ap->ability_match_count = 0;
3442 if (++ap->ability_match_count > 1) {
3443 ap->ability_match = 1;
3444 ap->ability_match_cfg = rx_cfg_reg;
3447 if (rx_cfg_reg & ANEG_CFG_ACK)
3455 ap->ability_match_cfg = 0;
3456 ap->ability_match_count = 0;
3457 ap->ability_match = 0;
3463 ap->rxconfig = rx_cfg_reg;
3466 switch (ap->state) {
3467 case ANEG_STATE_UNKNOWN:
3468 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3469 ap->state = ANEG_STATE_AN_ENABLE;
3472 case ANEG_STATE_AN_ENABLE:
3473 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3474 if (ap->flags & MR_AN_ENABLE) {
3477 ap->ability_match_cfg = 0;
3478 ap->ability_match_count = 0;
3479 ap->ability_match = 0;
3483 ap->state = ANEG_STATE_RESTART_INIT;
3485 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3489 case ANEG_STATE_RESTART_INIT:
3490 ap->link_time = ap->cur_time;
3491 ap->flags &= ~(MR_NP_LOADED);
3493 tw32(MAC_TX_AUTO_NEG, 0);
3494 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3495 tw32_f(MAC_MODE, tp->mac_mode);
3498 ret = ANEG_TIMER_ENAB;
3499 ap->state = ANEG_STATE_RESTART;
3502 case ANEG_STATE_RESTART:
3503 delta = ap->cur_time - ap->link_time;
3504 if (delta > ANEG_STATE_SETTLE_TIME)
3505 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3507 ret = ANEG_TIMER_ENAB;
3510 case ANEG_STATE_DISABLE_LINK_OK:
3514 case ANEG_STATE_ABILITY_DETECT_INIT:
3515 ap->flags &= ~(MR_TOGGLE_TX);
3516 ap->txconfig = ANEG_CFG_FD;
3517 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3518 if (flowctrl & ADVERTISE_1000XPAUSE)
3519 ap->txconfig |= ANEG_CFG_PS1;
3520 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3521 ap->txconfig |= ANEG_CFG_PS2;
3522 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3523 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3524 tw32_f(MAC_MODE, tp->mac_mode);
3527 ap->state = ANEG_STATE_ABILITY_DETECT;
3530 case ANEG_STATE_ABILITY_DETECT:
3531 if (ap->ability_match != 0 && ap->rxconfig != 0)
3532 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3535 case ANEG_STATE_ACK_DETECT_INIT:
3536 ap->txconfig |= ANEG_CFG_ACK;
3537 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3538 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3539 tw32_f(MAC_MODE, tp->mac_mode);
3542 ap->state = ANEG_STATE_ACK_DETECT;
3545 case ANEG_STATE_ACK_DETECT:
3546 if (ap->ack_match != 0) {
3547 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3548 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3549 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3551 ap->state = ANEG_STATE_AN_ENABLE;
3553 } else if (ap->ability_match != 0 &&
3554 ap->rxconfig == 0) {
3555 ap->state = ANEG_STATE_AN_ENABLE;
3559 case ANEG_STATE_COMPLETE_ACK_INIT:
3560 if (ap->rxconfig & ANEG_CFG_INVAL) {
3564 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3565 MR_LP_ADV_HALF_DUPLEX |
3566 MR_LP_ADV_SYM_PAUSE |
3567 MR_LP_ADV_ASYM_PAUSE |
3568 MR_LP_ADV_REMOTE_FAULT1 |
3569 MR_LP_ADV_REMOTE_FAULT2 |
3570 MR_LP_ADV_NEXT_PAGE |
3573 if (ap->rxconfig & ANEG_CFG_FD)
3574 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3575 if (ap->rxconfig & ANEG_CFG_HD)
3576 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3577 if (ap->rxconfig & ANEG_CFG_PS1)
3578 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3579 if (ap->rxconfig & ANEG_CFG_PS2)
3580 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3581 if (ap->rxconfig & ANEG_CFG_RF1)
3582 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3583 if (ap->rxconfig & ANEG_CFG_RF2)
3584 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3585 if (ap->rxconfig & ANEG_CFG_NP)
3586 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3588 ap->link_time = ap->cur_time;
3590 ap->flags ^= (MR_TOGGLE_TX);
3591 if (ap->rxconfig & 0x0008)
3592 ap->flags |= MR_TOGGLE_RX;
3593 if (ap->rxconfig & ANEG_CFG_NP)
3594 ap->flags |= MR_NP_RX;
3595 ap->flags |= MR_PAGE_RX;
3597 ap->state = ANEG_STATE_COMPLETE_ACK;
3598 ret = ANEG_TIMER_ENAB;
3601 case ANEG_STATE_COMPLETE_ACK:
3602 if (ap->ability_match != 0 &&
3603 ap->rxconfig == 0) {
3604 ap->state = ANEG_STATE_AN_ENABLE;
3607 delta = ap->cur_time - ap->link_time;
3608 if (delta > ANEG_STATE_SETTLE_TIME) {
3609 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3610 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3612 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3613 !(ap->flags & MR_NP_RX)) {
3614 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3622 case ANEG_STATE_IDLE_DETECT_INIT:
3623 ap->link_time = ap->cur_time;
3624 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3625 tw32_f(MAC_MODE, tp->mac_mode);
3628 ap->state = ANEG_STATE_IDLE_DETECT;
3629 ret = ANEG_TIMER_ENAB;
3632 case ANEG_STATE_IDLE_DETECT:
3633 if (ap->ability_match != 0 &&
3634 ap->rxconfig == 0) {
3635 ap->state = ANEG_STATE_AN_ENABLE;
3638 delta = ap->cur_time - ap->link_time;
3639 if (delta > ANEG_STATE_SETTLE_TIME) {
3640 /* XXX another gem from the Broadcom driver :( */
3641 ap->state = ANEG_STATE_LINK_OK;
3645 case ANEG_STATE_LINK_OK:
3646 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3650 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3651 /* ??? unimplemented */
3654 case ANEG_STATE_NEXT_PAGE_WAIT:
3655 /* ??? unimplemented */
3666 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3669 struct tg3_fiber_aneginfo aninfo;
3670 int status = ANEG_FAILED;
3674 tw32_f(MAC_TX_AUTO_NEG, 0);
3676 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3677 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3680 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3683 memset(&aninfo, 0, sizeof(aninfo));
3684 aninfo.flags |= MR_AN_ENABLE;
3685 aninfo.state = ANEG_STATE_UNKNOWN;
3686 aninfo.cur_time = 0;
3688 while (++tick < 195000) {
3689 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3690 if (status == ANEG_DONE || status == ANEG_FAILED)
3696 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3697 tw32_f(MAC_MODE, tp->mac_mode);
3700 *txflags = aninfo.txconfig;
3701 *rxflags = aninfo.flags;
3703 if (status == ANEG_DONE &&
3704 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3705 MR_LP_ADV_FULL_DUPLEX)))
3711 static void tg3_init_bcm8002(struct tg3 *tp)
3713 u32 mac_status = tr32(MAC_STATUS);
3716 /* Reset when initting first time or we have a link. */
3717 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3718 !(mac_status & MAC_STATUS_PCS_SYNCED))
3721 /* Set PLL lock range. */
3722 tg3_writephy(tp, 0x16, 0x8007);
3725 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3727 /* Wait for reset to complete. */
3728 /* XXX schedule_timeout() ... */
3729 for (i = 0; i < 500; i++)
3732 /* Config mode; select PMA/Ch 1 regs. */
3733 tg3_writephy(tp, 0x10, 0x8411);
3735 /* Enable auto-lock and comdet, select txclk for tx. */
3736 tg3_writephy(tp, 0x11, 0x0a10);
3738 tg3_writephy(tp, 0x18, 0x00a0);
3739 tg3_writephy(tp, 0x16, 0x41ff);
3741 /* Assert and deassert POR. */
3742 tg3_writephy(tp, 0x13, 0x0400);
3744 tg3_writephy(tp, 0x13, 0x0000);
3746 tg3_writephy(tp, 0x11, 0x0a50);
3748 tg3_writephy(tp, 0x11, 0x0a10);
3750 /* Wait for signal to stabilize */
3751 /* XXX schedule_timeout() ... */
3752 for (i = 0; i < 15000; i++)
3755 /* Deselect the channel register so we can read the PHYID
3758 tg3_writephy(tp, 0x10, 0x8011);
3761 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3764 u32 sg_dig_ctrl, sg_dig_status;
3765 u32 serdes_cfg, expected_sg_dig_ctrl;
3766 int workaround, port_a;
3767 int current_link_up;
3770 expected_sg_dig_ctrl = 0;
3773 current_link_up = 0;
3775 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3776 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3778 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3781 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3782 /* preserve bits 20-23 for voltage regulator */
3783 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3786 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3788 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3789 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3791 u32 val = serdes_cfg;
3797 tw32_f(MAC_SERDES_CFG, val);
3800 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3802 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3803 tg3_setup_flow_control(tp, 0, 0);
3804 current_link_up = 1;
3809 /* Want auto-negotiation. */
3810 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3812 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3813 if (flowctrl & ADVERTISE_1000XPAUSE)
3814 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3815 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3816 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3818 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3819 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3820 tp->serdes_counter &&
3821 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3822 MAC_STATUS_RCVD_CFG)) ==
3823 MAC_STATUS_PCS_SYNCED)) {
3824 tp->serdes_counter--;
3825 current_link_up = 1;
3830 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3831 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3833 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3835 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3836 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3837 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3838 MAC_STATUS_SIGNAL_DET)) {
3839 sg_dig_status = tr32(SG_DIG_STATUS);
3840 mac_status = tr32(MAC_STATUS);
3842 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3843 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3844 u32 local_adv = 0, remote_adv = 0;
3846 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3847 local_adv |= ADVERTISE_1000XPAUSE;
3848 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3849 local_adv |= ADVERTISE_1000XPSE_ASYM;
3851 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3852 remote_adv |= LPA_1000XPAUSE;
3853 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3854 remote_adv |= LPA_1000XPAUSE_ASYM;
3856 tg3_setup_flow_control(tp, local_adv, remote_adv);
3857 current_link_up = 1;
3858 tp->serdes_counter = 0;
3859 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3860 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3861 if (tp->serdes_counter)
3862 tp->serdes_counter--;
3865 u32 val = serdes_cfg;
3872 tw32_f(MAC_SERDES_CFG, val);
3875 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3878 /* Link parallel detection - link is up */
3879 /* only if we have PCS_SYNC and not */
3880 /* receiving config code words */
3881 mac_status = tr32(MAC_STATUS);
3882 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3883 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3884 tg3_setup_flow_control(tp, 0, 0);
3885 current_link_up = 1;
3887 TG3_FLG2_PARALLEL_DETECT;
3888 tp->serdes_counter =
3889 SERDES_PARALLEL_DET_TIMEOUT;
3891 goto restart_autoneg;
3895 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3896 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3900 return current_link_up;
3903 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3905 int current_link_up = 0;
3907 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3910 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3911 u32 txflags, rxflags;
3914 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3915 u32 local_adv = 0, remote_adv = 0;
3917 if (txflags & ANEG_CFG_PS1)
3918 local_adv |= ADVERTISE_1000XPAUSE;
3919 if (txflags & ANEG_CFG_PS2)
3920 local_adv |= ADVERTISE_1000XPSE_ASYM;
3922 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3923 remote_adv |= LPA_1000XPAUSE;
3924 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3925 remote_adv |= LPA_1000XPAUSE_ASYM;
3927 tg3_setup_flow_control(tp, local_adv, remote_adv);
3929 current_link_up = 1;
3931 for (i = 0; i < 30; i++) {
3934 (MAC_STATUS_SYNC_CHANGED |
3935 MAC_STATUS_CFG_CHANGED));
3937 if ((tr32(MAC_STATUS) &
3938 (MAC_STATUS_SYNC_CHANGED |
3939 MAC_STATUS_CFG_CHANGED)) == 0)
3943 mac_status = tr32(MAC_STATUS);
3944 if (current_link_up == 0 &&
3945 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3946 !(mac_status & MAC_STATUS_RCVD_CFG))
3947 current_link_up = 1;
3949 tg3_setup_flow_control(tp, 0, 0);
3951 /* Forcing 1000FD link up. */
3952 current_link_up = 1;
3954 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3957 tw32_f(MAC_MODE, tp->mac_mode);
3962 return current_link_up;
3965 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3968 u16 orig_active_speed;
3969 u8 orig_active_duplex;
3971 int current_link_up;
3974 orig_pause_cfg = tp->link_config.active_flowctrl;
3975 orig_active_speed = tp->link_config.active_speed;
3976 orig_active_duplex = tp->link_config.active_duplex;
3978 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3979 netif_carrier_ok(tp->dev) &&
3980 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3981 mac_status = tr32(MAC_STATUS);
3982 mac_status &= (MAC_STATUS_PCS_SYNCED |
3983 MAC_STATUS_SIGNAL_DET |
3984 MAC_STATUS_CFG_CHANGED |
3985 MAC_STATUS_RCVD_CFG);
3986 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3987 MAC_STATUS_SIGNAL_DET)) {
3988 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3989 MAC_STATUS_CFG_CHANGED));
3994 tw32_f(MAC_TX_AUTO_NEG, 0);
3996 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3997 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3998 tw32_f(MAC_MODE, tp->mac_mode);
4001 if (tp->phy_id == TG3_PHY_ID_BCM8002)
4002 tg3_init_bcm8002(tp);
4004 /* Enable link change event even when serdes polling. */
4005 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4008 current_link_up = 0;
4009 mac_status = tr32(MAC_STATUS);
4011 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
4012 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4014 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4016 tp->napi[0].hw_status->status =
4017 (SD_STATUS_UPDATED |
4018 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4020 for (i = 0; i < 100; i++) {
4021 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4022 MAC_STATUS_CFG_CHANGED));
4024 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4025 MAC_STATUS_CFG_CHANGED |
4026 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4030 mac_status = tr32(MAC_STATUS);
4031 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4032 current_link_up = 0;
4033 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4034 tp->serdes_counter == 0) {
4035 tw32_f(MAC_MODE, (tp->mac_mode |
4036 MAC_MODE_SEND_CONFIGS));
4038 tw32_f(MAC_MODE, tp->mac_mode);
4042 if (current_link_up == 1) {
4043 tp->link_config.active_speed = SPEED_1000;
4044 tp->link_config.active_duplex = DUPLEX_FULL;
4045 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4046 LED_CTRL_LNKLED_OVERRIDE |
4047 LED_CTRL_1000MBPS_ON));
4049 tp->link_config.active_speed = SPEED_INVALID;
4050 tp->link_config.active_duplex = DUPLEX_INVALID;
4051 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4052 LED_CTRL_LNKLED_OVERRIDE |
4053 LED_CTRL_TRAFFIC_OVERRIDE));
4056 if (current_link_up != netif_carrier_ok(tp->dev)) {
4057 if (current_link_up)
4058 netif_carrier_on(tp->dev);
4060 netif_carrier_off(tp->dev);
4061 tg3_link_report(tp);
4063 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4064 if (orig_pause_cfg != now_pause_cfg ||
4065 orig_active_speed != tp->link_config.active_speed ||
4066 orig_active_duplex != tp->link_config.active_duplex)
4067 tg3_link_report(tp);
4073 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4075 int current_link_up, err = 0;
4079 u32 local_adv, remote_adv;
4081 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4082 tw32_f(MAC_MODE, tp->mac_mode);
4088 (MAC_STATUS_SYNC_CHANGED |
4089 MAC_STATUS_CFG_CHANGED |
4090 MAC_STATUS_MI_COMPLETION |
4091 MAC_STATUS_LNKSTATE_CHANGED));
4097 current_link_up = 0;
4098 current_speed = SPEED_INVALID;
4099 current_duplex = DUPLEX_INVALID;
4101 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4102 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4103 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4104 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4105 bmsr |= BMSR_LSTATUS;
4107 bmsr &= ~BMSR_LSTATUS;
4110 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4112 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4113 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4114 /* do nothing, just check for link up at the end */
4115 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4118 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4119 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4120 ADVERTISE_1000XPAUSE |
4121 ADVERTISE_1000XPSE_ASYM |
4124 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4126 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4127 new_adv |= ADVERTISE_1000XHALF;
4128 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4129 new_adv |= ADVERTISE_1000XFULL;
4131 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4132 tg3_writephy(tp, MII_ADVERTISE, new_adv);
4133 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4134 tg3_writephy(tp, MII_BMCR, bmcr);
4136 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4137 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4138 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4145 bmcr &= ~BMCR_SPEED1000;
4146 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4148 if (tp->link_config.duplex == DUPLEX_FULL)
4149 new_bmcr |= BMCR_FULLDPLX;
4151 if (new_bmcr != bmcr) {
4152 /* BMCR_SPEED1000 is a reserved bit that needs
4153 * to be set on write.
4155 new_bmcr |= BMCR_SPEED1000;
4157 /* Force a linkdown */
4158 if (netif_carrier_ok(tp->dev)) {
4161 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4162 adv &= ~(ADVERTISE_1000XFULL |
4163 ADVERTISE_1000XHALF |
4165 tg3_writephy(tp, MII_ADVERTISE, adv);
4166 tg3_writephy(tp, MII_BMCR, bmcr |
4170 netif_carrier_off(tp->dev);
4172 tg3_writephy(tp, MII_BMCR, new_bmcr);
4174 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4175 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4176 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4178 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4179 bmsr |= BMSR_LSTATUS;
4181 bmsr &= ~BMSR_LSTATUS;
4183 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4187 if (bmsr & BMSR_LSTATUS) {
4188 current_speed = SPEED_1000;
4189 current_link_up = 1;
4190 if (bmcr & BMCR_FULLDPLX)
4191 current_duplex = DUPLEX_FULL;
4193 current_duplex = DUPLEX_HALF;
4198 if (bmcr & BMCR_ANENABLE) {
4201 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4202 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4203 common = local_adv & remote_adv;
4204 if (common & (ADVERTISE_1000XHALF |
4205 ADVERTISE_1000XFULL)) {
4206 if (common & ADVERTISE_1000XFULL)
4207 current_duplex = DUPLEX_FULL;
4209 current_duplex = DUPLEX_HALF;
4210 } else if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
4211 /* Link is up via parallel detect */
4213 current_link_up = 0;
4218 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4219 tg3_setup_flow_control(tp, local_adv, remote_adv);
4221 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4222 if (tp->link_config.active_duplex == DUPLEX_HALF)
4223 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4225 tw32_f(MAC_MODE, tp->mac_mode);
4228 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4230 tp->link_config.active_speed = current_speed;
4231 tp->link_config.active_duplex = current_duplex;
4233 if (current_link_up != netif_carrier_ok(tp->dev)) {
4234 if (current_link_up)
4235 netif_carrier_on(tp->dev);
4237 netif_carrier_off(tp->dev);
4238 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4240 tg3_link_report(tp);
4245 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4247 if (tp->serdes_counter) {
4248 /* Give autoneg time to complete. */
4249 tp->serdes_counter--;
4253 if (!netif_carrier_ok(tp->dev) &&
4254 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4257 tg3_readphy(tp, MII_BMCR, &bmcr);
4258 if (bmcr & BMCR_ANENABLE) {
4261 /* Select shadow register 0x1f */
4262 tg3_writephy(tp, 0x1c, 0x7c00);
4263 tg3_readphy(tp, 0x1c, &phy1);
4265 /* Select expansion interrupt status register */
4266 tg3_writephy(tp, 0x17, 0x0f01);
4267 tg3_readphy(tp, 0x15, &phy2);
4268 tg3_readphy(tp, 0x15, &phy2);
4270 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4271 /* We have signal detect and not receiving
4272 * config code words, link is up by parallel
4276 bmcr &= ~BMCR_ANENABLE;
4277 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4278 tg3_writephy(tp, MII_BMCR, bmcr);
4279 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4282 } else if (netif_carrier_ok(tp->dev) &&
4283 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4284 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4287 /* Select expansion interrupt status register */
4288 tg3_writephy(tp, 0x17, 0x0f01);
4289 tg3_readphy(tp, 0x15, &phy2);
4293 /* Config code words received, turn on autoneg. */
4294 tg3_readphy(tp, MII_BMCR, &bmcr);
4295 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4297 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
4303 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4307 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4308 err = tg3_setup_fiber_phy(tp, force_reset);
4309 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4310 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4312 err = tg3_setup_copper_phy(tp, force_reset);
4314 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4317 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4318 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4320 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4325 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4326 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4327 tw32(GRC_MISC_CFG, val);
4330 if (tp->link_config.active_speed == SPEED_1000 &&
4331 tp->link_config.active_duplex == DUPLEX_HALF)
4332 tw32(MAC_TX_LENGTHS,
4333 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4334 (6 << TX_LENGTHS_IPG_SHIFT) |
4335 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
4337 tw32(MAC_TX_LENGTHS,
4338 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4339 (6 << TX_LENGTHS_IPG_SHIFT) |
4340 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
4342 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4343 if (netif_carrier_ok(tp->dev)) {
4344 tw32(HOSTCC_STAT_COAL_TICKS,
4345 tp->coal.stats_block_coalesce_usecs);
4347 tw32(HOSTCC_STAT_COAL_TICKS, 0);
4351 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
4352 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
4353 if (!netif_carrier_ok(tp->dev))
4354 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4357 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4358 tw32(PCIE_PWR_MGMT_THRESH, val);
4364 /* This is called whenever we suspect that the system chipset is re-
4365 * ordering the sequence of MMIO to the tx send mailbox. The symptom
4366 * is bogus tx completions. We try to recover by setting the
4367 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4370 static void tg3_tx_recover(struct tg3 *tp)
4372 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4373 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4375 netdev_warn(tp->dev,
4376 "The system may be re-ordering memory-mapped I/O "
4377 "cycles to the network device, attempting to recover. "
4378 "Please report the problem to the driver maintainer "
4379 "and include system chipset information.\n");
4381 spin_lock(&tp->lock);
4382 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
4383 spin_unlock(&tp->lock);
4386 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4389 return tnapi->tx_pending -
4390 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4393 /* Tigon3 never reports partial packet sends. So we do not
4394 * need special logic to handle SKBs that have not had all
4395 * of their frags sent yet, like SunGEM does.
4397 static void tg3_tx(struct tg3_napi *tnapi)
4399 struct tg3 *tp = tnapi->tp;
4400 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4401 u32 sw_idx = tnapi->tx_cons;
4402 struct netdev_queue *txq;
4403 int index = tnapi - tp->napi;
4405 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
4408 txq = netdev_get_tx_queue(tp->dev, index);
4410 while (sw_idx != hw_idx) {
4411 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4412 struct sk_buff *skb = ri->skb;
4415 if (unlikely(skb == NULL)) {
4420 pci_unmap_single(tp->pdev,
4421 dma_unmap_addr(ri, mapping),
4427 sw_idx = NEXT_TX(sw_idx);
4429 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4430 ri = &tnapi->tx_buffers[sw_idx];
4431 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4434 pci_unmap_page(tp->pdev,
4435 dma_unmap_addr(ri, mapping),
4436 skb_shinfo(skb)->frags[i].size,
4438 sw_idx = NEXT_TX(sw_idx);
4443 if (unlikely(tx_bug)) {
4449 tnapi->tx_cons = sw_idx;
4451 /* Need to make the tx_cons update visible to tg3_start_xmit()
4452 * before checking for netif_queue_stopped(). Without the
4453 * memory barrier, there is a small possibility that tg3_start_xmit()
4454 * will miss it and cause the queue to be stopped forever.
4458 if (unlikely(netif_tx_queue_stopped(txq) &&
4459 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4460 __netif_tx_lock(txq, smp_processor_id());
4461 if (netif_tx_queue_stopped(txq) &&
4462 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4463 netif_tx_wake_queue(txq);
4464 __netif_tx_unlock(txq);
4468 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4473 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4474 map_sz, PCI_DMA_FROMDEVICE);
4475 dev_kfree_skb_any(ri->skb);
4479 /* Returns size of skb allocated or < 0 on error.
4481 * We only need to fill in the address because the other members
4482 * of the RX descriptor are invariant, see tg3_init_rings.
4484 * Note the purposeful assymetry of cpu vs. chip accesses. For
4485 * posting buffers we only dirty the first cache line of the RX
4486 * descriptor (containing the address). Whereas for the RX status
4487 * buffers the cpu only reads the last cacheline of the RX descriptor
4488 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4490 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4491 u32 opaque_key, u32 dest_idx_unmasked)
4493 struct tg3_rx_buffer_desc *desc;
4494 struct ring_info *map, *src_map;
4495 struct sk_buff *skb;
4497 int skb_size, dest_idx;
4500 switch (opaque_key) {
4501 case RXD_OPAQUE_RING_STD:
4502 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4503 desc = &tpr->rx_std[dest_idx];
4504 map = &tpr->rx_std_buffers[dest_idx];
4505 skb_size = tp->rx_pkt_map_sz;
4508 case RXD_OPAQUE_RING_JUMBO:
4509 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4510 desc = &tpr->rx_jmb[dest_idx].std;
4511 map = &tpr->rx_jmb_buffers[dest_idx];
4512 skb_size = TG3_RX_JMB_MAP_SZ;
4519 /* Do not overwrite any of the map or rp information
4520 * until we are sure we can commit to a new buffer.
4522 * Callers depend upon this behavior and assume that
4523 * we leave everything unchanged if we fail.
4525 skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4529 skb_reserve(skb, tp->rx_offset);
4531 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4532 PCI_DMA_FROMDEVICE);
4533 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4539 dma_unmap_addr_set(map, mapping, mapping);
4541 desc->addr_hi = ((u64)mapping >> 32);
4542 desc->addr_lo = ((u64)mapping & 0xffffffff);
4547 /* We only need to move over in the address because the other
4548 * members of the RX descriptor are invariant. See notes above
4549 * tg3_alloc_rx_skb for full details.
4551 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4552 struct tg3_rx_prodring_set *dpr,
4553 u32 opaque_key, int src_idx,
4554 u32 dest_idx_unmasked)
4556 struct tg3 *tp = tnapi->tp;
4557 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4558 struct ring_info *src_map, *dest_map;
4559 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4562 switch (opaque_key) {
4563 case RXD_OPAQUE_RING_STD:
4564 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4565 dest_desc = &dpr->rx_std[dest_idx];
4566 dest_map = &dpr->rx_std_buffers[dest_idx];
4567 src_desc = &spr->rx_std[src_idx];
4568 src_map = &spr->rx_std_buffers[src_idx];
4571 case RXD_OPAQUE_RING_JUMBO:
4572 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4573 dest_desc = &dpr->rx_jmb[dest_idx].std;
4574 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4575 src_desc = &spr->rx_jmb[src_idx].std;
4576 src_map = &spr->rx_jmb_buffers[src_idx];
4583 dest_map->skb = src_map->skb;
4584 dma_unmap_addr_set(dest_map, mapping,
4585 dma_unmap_addr(src_map, mapping));
4586 dest_desc->addr_hi = src_desc->addr_hi;
4587 dest_desc->addr_lo = src_desc->addr_lo;
4589 /* Ensure that the update to the skb happens after the physical
4590 * addresses have been transferred to the new BD location.
4594 src_map->skb = NULL;
4597 /* The RX ring scheme is composed of multiple rings which post fresh
4598 * buffers to the chip, and one special ring the chip uses to report
4599 * status back to the host.
4601 * The special ring reports the status of received packets to the
4602 * host. The chip does not write into the original descriptor the
4603 * RX buffer was obtained from. The chip simply takes the original
4604 * descriptor as provided by the host, updates the status and length
4605 * field, then writes this into the next status ring entry.
4607 * Each ring the host uses to post buffers to the chip is described
4608 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4609 * it is first placed into the on-chip ram. When the packet's length
4610 * is known, it walks down the TG3_BDINFO entries to select the ring.
4611 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4612 * which is within the range of the new packet's length is chosen.
4614 * The "separate ring for rx status" scheme may sound queer, but it makes
4615 * sense from a cache coherency perspective. If only the host writes
4616 * to the buffer post rings, and only the chip writes to the rx status
4617 * rings, then cache lines never move beyond shared-modified state.
4618 * If both the host and chip were to write into the same ring, cache line
4619 * eviction could occur since both entities want it in an exclusive state.
4621 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4623 struct tg3 *tp = tnapi->tp;
4624 u32 work_mask, rx_std_posted = 0;
4625 u32 std_prod_idx, jmb_prod_idx;
4626 u32 sw_idx = tnapi->rx_rcb_ptr;
4629 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4631 hw_idx = *(tnapi->rx_rcb_prod_idx);
4633 * We need to order the read of hw_idx and the read of
4634 * the opaque cookie.
4639 std_prod_idx = tpr->rx_std_prod_idx;
4640 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4641 while (sw_idx != hw_idx && budget > 0) {
4642 struct ring_info *ri;
4643 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4645 struct sk_buff *skb;
4646 dma_addr_t dma_addr;
4647 u32 opaque_key, desc_idx, *post_ptr;
4648 bool hw_vlan __maybe_unused = false;
4649 u16 vtag __maybe_unused = 0;
4651 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4652 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4653 if (opaque_key == RXD_OPAQUE_RING_STD) {
4654 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4655 dma_addr = dma_unmap_addr(ri, mapping);
4657 post_ptr = &std_prod_idx;
4659 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4660 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4661 dma_addr = dma_unmap_addr(ri, mapping);
4663 post_ptr = &jmb_prod_idx;
4665 goto next_pkt_nopost;
4667 work_mask |= opaque_key;
4669 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4670 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4672 tg3_recycle_rx(tnapi, tpr, opaque_key,
4673 desc_idx, *post_ptr);
4675 /* Other statistics kept track of by card. */
4676 tp->net_stats.rx_dropped++;
4680 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4683 if (len > TG3_RX_COPY_THRESH(tp)) {
4686 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4691 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4692 PCI_DMA_FROMDEVICE);
4694 /* Ensure that the update to the skb happens
4695 * after the usage of the old DMA mapping.
4703 struct sk_buff *copy_skb;
4705 tg3_recycle_rx(tnapi, tpr, opaque_key,
4706 desc_idx, *post_ptr);
4708 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4710 if (copy_skb == NULL)
4711 goto drop_it_no_recycle;
4713 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4714 skb_put(copy_skb, len);
4715 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4716 skb_copy_from_linear_data(skb, copy_skb->data, len);
4717 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4719 /* We'll reuse the original ring buffer. */
4723 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4724 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4725 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4726 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4727 skb->ip_summed = CHECKSUM_UNNECESSARY;
4729 skb->ip_summed = CHECKSUM_NONE;
4731 skb->protocol = eth_type_trans(skb, tp->dev);
4733 if (len > (tp->dev->mtu + ETH_HLEN) &&
4734 skb->protocol != htons(ETH_P_8021Q)) {
4739 if (desc->type_flags & RXD_FLAG_VLAN &&
4740 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4741 vtag = desc->err_vlan & RXD_VLAN_MASK;
4742 #if TG3_VLAN_TAG_USED
4748 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4749 __skb_push(skb, VLAN_HLEN);
4751 memmove(ve, skb->data + VLAN_HLEN,
4753 ve->h_vlan_proto = htons(ETH_P_8021Q);
4754 ve->h_vlan_TCI = htons(vtag);
4758 #if TG3_VLAN_TAG_USED
4760 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4763 napi_gro_receive(&tnapi->napi, skb);
4771 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4772 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4773 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4774 tpr->rx_std_prod_idx);
4775 work_mask &= ~RXD_OPAQUE_RING_STD;
4780 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4782 /* Refresh hw_idx to see if there is new work */
4783 if (sw_idx == hw_idx) {
4784 hw_idx = *(tnapi->rx_rcb_prod_idx);
4789 /* ACK the status ring. */
4790 tnapi->rx_rcb_ptr = sw_idx;
4791 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4793 /* Refill RX ring(s). */
4794 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
4795 if (work_mask & RXD_OPAQUE_RING_STD) {
4796 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4797 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4798 tpr->rx_std_prod_idx);
4800 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4801 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4802 TG3_RX_JUMBO_RING_SIZE;
4803 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4804 tpr->rx_jmb_prod_idx);
4807 } else if (work_mask) {
4808 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4809 * updated before the producer indices can be updated.
4813 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4814 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4816 if (tnapi != &tp->napi[1])
4817 napi_schedule(&tp->napi[1].napi);
4823 static void tg3_poll_link(struct tg3 *tp)
4825 /* handle link change and other phy events */
4826 if (!(tp->tg3_flags &
4827 (TG3_FLAG_USE_LINKCHG_REG |
4828 TG3_FLAG_POLL_SERDES))) {
4829 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4831 if (sblk->status & SD_STATUS_LINK_CHG) {
4832 sblk->status = SD_STATUS_UPDATED |
4833 (sblk->status & ~SD_STATUS_LINK_CHG);
4834 spin_lock(&tp->lock);
4835 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4837 (MAC_STATUS_SYNC_CHANGED |
4838 MAC_STATUS_CFG_CHANGED |
4839 MAC_STATUS_MI_COMPLETION |
4840 MAC_STATUS_LNKSTATE_CHANGED));
4843 tg3_setup_phy(tp, 0);
4844 spin_unlock(&tp->lock);
4849 static int tg3_rx_prodring_xfer(struct tg3 *tp,
4850 struct tg3_rx_prodring_set *dpr,
4851 struct tg3_rx_prodring_set *spr)
4853 u32 si, di, cpycnt, src_prod_idx;
4857 src_prod_idx = spr->rx_std_prod_idx;
4859 /* Make sure updates to the rx_std_buffers[] entries and the
4860 * standard producer index are seen in the correct order.
4864 if (spr->rx_std_cons_idx == src_prod_idx)
4867 if (spr->rx_std_cons_idx < src_prod_idx)
4868 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4870 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4872 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4874 si = spr->rx_std_cons_idx;
4875 di = dpr->rx_std_prod_idx;
4877 for (i = di; i < di + cpycnt; i++) {
4878 if (dpr->rx_std_buffers[i].skb) {
4888 /* Ensure that updates to the rx_std_buffers ring and the
4889 * shadowed hardware producer ring from tg3_recycle_skb() are
4890 * ordered correctly WRT the skb check above.
4894 memcpy(&dpr->rx_std_buffers[di],
4895 &spr->rx_std_buffers[si],
4896 cpycnt * sizeof(struct ring_info));
4898 for (i = 0; i < cpycnt; i++, di++, si++) {
4899 struct tg3_rx_buffer_desc *sbd, *dbd;
4900 sbd = &spr->rx_std[si];
4901 dbd = &dpr->rx_std[di];
4902 dbd->addr_hi = sbd->addr_hi;
4903 dbd->addr_lo = sbd->addr_lo;
4906 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4908 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4913 src_prod_idx = spr->rx_jmb_prod_idx;
4915 /* Make sure updates to the rx_jmb_buffers[] entries and
4916 * the jumbo producer index are seen in the correct order.
4920 if (spr->rx_jmb_cons_idx == src_prod_idx)
4923 if (spr->rx_jmb_cons_idx < src_prod_idx)
4924 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4926 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4928 cpycnt = min(cpycnt,
4929 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4931 si = spr->rx_jmb_cons_idx;
4932 di = dpr->rx_jmb_prod_idx;
4934 for (i = di; i < di + cpycnt; i++) {
4935 if (dpr->rx_jmb_buffers[i].skb) {
4945 /* Ensure that updates to the rx_jmb_buffers ring and the
4946 * shadowed hardware producer ring from tg3_recycle_skb() are
4947 * ordered correctly WRT the skb check above.
4951 memcpy(&dpr->rx_jmb_buffers[di],
4952 &spr->rx_jmb_buffers[si],
4953 cpycnt * sizeof(struct ring_info));
4955 for (i = 0; i < cpycnt; i++, di++, si++) {
4956 struct tg3_rx_buffer_desc *sbd, *dbd;
4957 sbd = &spr->rx_jmb[si].std;
4958 dbd = &dpr->rx_jmb[di].std;
4959 dbd->addr_hi = sbd->addr_hi;
4960 dbd->addr_lo = sbd->addr_lo;
4963 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4964 TG3_RX_JUMBO_RING_SIZE;
4965 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4966 TG3_RX_JUMBO_RING_SIZE;
4972 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4974 struct tg3 *tp = tnapi->tp;
4976 /* run TX completion thread */
4977 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
4979 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4983 /* run RX thread, within the bounds set by NAPI.
4984 * All RX "locking" is done by ensuring outside
4985 * code synchronizes with tg3->napi.poll()
4987 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4988 work_done += tg3_rx(tnapi, budget - work_done);
4990 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4991 struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
4993 u32 std_prod_idx = dpr->rx_std_prod_idx;
4994 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
4996 for (i = 1; i < tp->irq_cnt; i++)
4997 err |= tg3_rx_prodring_xfer(tp, dpr,
4998 tp->napi[i].prodring);
5002 if (std_prod_idx != dpr->rx_std_prod_idx)
5003 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5004 dpr->rx_std_prod_idx);
5006 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5007 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5008 dpr->rx_jmb_prod_idx);
5013 tw32_f(HOSTCC_MODE, tp->coal_now);
5019 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5021 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5022 struct tg3 *tp = tnapi->tp;
5024 struct tg3_hw_status *sblk = tnapi->hw_status;
5027 work_done = tg3_poll_work(tnapi, work_done, budget);
5029 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5032 if (unlikely(work_done >= budget))
5035 /* tp->last_tag is used in tg3_int_reenable() below
5036 * to tell the hw how much work has been processed,
5037 * so we must read it before checking for more work.
5039 tnapi->last_tag = sblk->status_tag;
5040 tnapi->last_irq_tag = tnapi->last_tag;
5043 /* check for RX/TX work to do */
5044 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5045 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5046 napi_complete(napi);
5047 /* Reenable interrupts. */
5048 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5057 /* work_done is guaranteed to be less than budget. */
5058 napi_complete(napi);
5059 schedule_work(&tp->reset_task);
5063 static int tg3_poll(struct napi_struct *napi, int budget)
5065 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5066 struct tg3 *tp = tnapi->tp;
5068 struct tg3_hw_status *sblk = tnapi->hw_status;
5073 work_done = tg3_poll_work(tnapi, work_done, budget);
5075 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
5078 if (unlikely(work_done >= budget))
5081 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
5082 /* tp->last_tag is used in tg3_int_reenable() below
5083 * to tell the hw how much work has been processed,
5084 * so we must read it before checking for more work.
5086 tnapi->last_tag = sblk->status_tag;
5087 tnapi->last_irq_tag = tnapi->last_tag;
5090 sblk->status &= ~SD_STATUS_UPDATED;
5092 if (likely(!tg3_has_work(tnapi))) {
5093 napi_complete(napi);
5094 tg3_int_reenable(tnapi);
5102 /* work_done is guaranteed to be less than budget. */
5103 napi_complete(napi);
5104 schedule_work(&tp->reset_task);
5108 static void tg3_irq_quiesce(struct tg3 *tp)
5112 BUG_ON(tp->irq_sync);
5117 for (i = 0; i < tp->irq_cnt; i++)
5118 synchronize_irq(tp->napi[i].irq_vec);
5121 static inline int tg3_irq_sync(struct tg3 *tp)
5123 return tp->irq_sync;
5126 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5127 * If irq_sync is non-zero, then the IRQ handler must be synchronized
5128 * with as well. Most of the time, this is not necessary except when
5129 * shutting down the device.
5131 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5133 spin_lock_bh(&tp->lock);
5135 tg3_irq_quiesce(tp);
5138 static inline void tg3_full_unlock(struct tg3 *tp)
5140 spin_unlock_bh(&tp->lock);
5143 /* One-shot MSI handler - Chip automatically disables interrupt
5144 * after sending MSI so driver doesn't have to do it.
5146 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5148 struct tg3_napi *tnapi = dev_id;
5149 struct tg3 *tp = tnapi->tp;
5151 prefetch(tnapi->hw_status);
5153 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5155 if (likely(!tg3_irq_sync(tp)))
5156 napi_schedule(&tnapi->napi);
5161 /* MSI ISR - No need to check for interrupt sharing and no need to
5162 * flush status block and interrupt mailbox. PCI ordering rules
5163 * guarantee that MSI will arrive after the status block.
5165 static irqreturn_t tg3_msi(int irq, void *dev_id)
5167 struct tg3_napi *tnapi = dev_id;
5168 struct tg3 *tp = tnapi->tp;
5170 prefetch(tnapi->hw_status);
5172 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5174 * Writing any value to intr-mbox-0 clears PCI INTA# and
5175 * chip-internal interrupt pending events.
5176 * Writing non-zero to intr-mbox-0 additional tells the
5177 * NIC to stop sending us irqs, engaging "in-intr-handler"
5180 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5181 if (likely(!tg3_irq_sync(tp)))
5182 napi_schedule(&tnapi->napi);
5184 return IRQ_RETVAL(1);
5187 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5189 struct tg3_napi *tnapi = dev_id;
5190 struct tg3 *tp = tnapi->tp;
5191 struct tg3_hw_status *sblk = tnapi->hw_status;
5192 unsigned int handled = 1;
5194 /* In INTx mode, it is possible for the interrupt to arrive at
5195 * the CPU before the status block posted prior to the interrupt.
5196 * Reading the PCI State register will confirm whether the
5197 * interrupt is ours and will flush the status block.
5199 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5200 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5201 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5208 * Writing any value to intr-mbox-0 clears PCI INTA# and
5209 * chip-internal interrupt pending events.
5210 * Writing non-zero to intr-mbox-0 additional tells the
5211 * NIC to stop sending us irqs, engaging "in-intr-handler"
5214 * Flush the mailbox to de-assert the IRQ immediately to prevent
5215 * spurious interrupts. The flush impacts performance but
5216 * excessive spurious interrupts can be worse in some cases.
5218 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5219 if (tg3_irq_sync(tp))
5221 sblk->status &= ~SD_STATUS_UPDATED;
5222 if (likely(tg3_has_work(tnapi))) {
5223 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5224 napi_schedule(&tnapi->napi);
5226 /* No work, shared interrupt perhaps? re-enable
5227 * interrupts, and flush that PCI write
5229 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5233 return IRQ_RETVAL(handled);
5236 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5238 struct tg3_napi *tnapi = dev_id;
5239 struct tg3 *tp = tnapi->tp;
5240 struct tg3_hw_status *sblk = tnapi->hw_status;
5241 unsigned int handled = 1;
5243 /* In INTx mode, it is possible for the interrupt to arrive at
5244 * the CPU before the status block posted prior to the interrupt.
5245 * Reading the PCI State register will confirm whether the
5246 * interrupt is ours and will flush the status block.
5248 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5249 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
5250 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5257 * writing any value to intr-mbox-0 clears PCI INTA# and
5258 * chip-internal interrupt pending events.
5259 * writing non-zero to intr-mbox-0 additional tells the
5260 * NIC to stop sending us irqs, engaging "in-intr-handler"
5263 * Flush the mailbox to de-assert the IRQ immediately to prevent
5264 * spurious interrupts. The flush impacts performance but
5265 * excessive spurious interrupts can be worse in some cases.
5267 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5270 * In a shared interrupt configuration, sometimes other devices'
5271 * interrupts will scream. We record the current status tag here
5272 * so that the above check can report that the screaming interrupts
5273 * are unhandled. Eventually they will be silenced.
5275 tnapi->last_irq_tag = sblk->status_tag;
5277 if (tg3_irq_sync(tp))
5280 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5282 napi_schedule(&tnapi->napi);
5285 return IRQ_RETVAL(handled);
5288 /* ISR for interrupt test */
5289 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5291 struct tg3_napi *tnapi = dev_id;
5292 struct tg3 *tp = tnapi->tp;
5293 struct tg3_hw_status *sblk = tnapi->hw_status;
5295 if ((sblk->status & SD_STATUS_UPDATED) ||
5296 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5297 tg3_disable_ints(tp);
5298 return IRQ_RETVAL(1);
5300 return IRQ_RETVAL(0);
5303 static int tg3_init_hw(struct tg3 *, int);
5304 static int tg3_halt(struct tg3 *, int, int);
5306 /* Restart hardware after configuration changes, self-test, etc.
5307 * Invoked with tp->lock held.
5309 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5310 __releases(tp->lock)
5311 __acquires(tp->lock)
5315 err = tg3_init_hw(tp, reset_phy);
5318 "Failed to re-initialize device, aborting\n");
5319 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5320 tg3_full_unlock(tp);
5321 del_timer_sync(&tp->timer);
5323 tg3_napi_enable(tp);
5325 tg3_full_lock(tp, 0);
5330 #ifdef CONFIG_NET_POLL_CONTROLLER
5331 static void tg3_poll_controller(struct net_device *dev)
5334 struct tg3 *tp = netdev_priv(dev);
5336 for (i = 0; i < tp->irq_cnt; i++)
5337 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5341 static void tg3_reset_task(struct work_struct *work)
5343 struct tg3 *tp = container_of(work, struct tg3, reset_task);
5345 unsigned int restart_timer;
5347 tg3_full_lock(tp, 0);
5349 if (!netif_running(tp->dev)) {
5350 tg3_full_unlock(tp);
5354 tg3_full_unlock(tp);
5360 tg3_full_lock(tp, 1);
5362 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
5363 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
5365 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
5366 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5367 tp->write32_rx_mbox = tg3_write_flush_reg32;
5368 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
5369 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
5372 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5373 err = tg3_init_hw(tp, 1);
5377 tg3_netif_start(tp);
5380 mod_timer(&tp->timer, jiffies + 1);
5383 tg3_full_unlock(tp);
5389 static void tg3_dump_short_state(struct tg3 *tp)
5391 netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
5392 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
5393 netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
5394 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
5397 static void tg3_tx_timeout(struct net_device *dev)
5399 struct tg3 *tp = netdev_priv(dev);
5401 if (netif_msg_tx_err(tp)) {
5402 netdev_err(dev, "transmit timed out, resetting\n");
5403 tg3_dump_short_state(tp);
5406 schedule_work(&tp->reset_task);
5409 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5410 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5412 u32 base = (u32) mapping & 0xffffffff;
5414 return ((base > 0xffffdcc0) &&
5415 (base + len + 8 < base));
5418 /* Test for DMA addresses > 40-bit */
5419 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5422 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5423 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
5424 return (((u64) mapping + len) > DMA_BIT_MASK(40));
5431 static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5433 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5434 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5435 struct sk_buff *skb, u32 last_plus_one,
5436 u32 *start, u32 base_flags, u32 mss)
5438 struct tg3 *tp = tnapi->tp;
5439 struct sk_buff *new_skb;
5440 dma_addr_t new_addr = 0;
5444 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5445 new_skb = skb_copy(skb, GFP_ATOMIC);
5447 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5449 new_skb = skb_copy_expand(skb,
5450 skb_headroom(skb) + more_headroom,
5451 skb_tailroom(skb), GFP_ATOMIC);
5457 /* New SKB is guaranteed to be linear. */
5459 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5461 /* Make sure the mapping succeeded */
5462 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5464 dev_kfree_skb(new_skb);
5467 /* Make sure new skb does not cross any 4G boundaries.
5468 * Drop the packet if it does.
5470 } else if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5471 tg3_4g_overflow_test(new_addr, new_skb->len)) {
5472 pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5475 dev_kfree_skb(new_skb);
5478 tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5479 base_flags, 1 | (mss << 1));
5480 *start = NEXT_TX(entry);
5484 /* Now clean up the sw ring entries. */
5486 while (entry != last_plus_one) {
5490 len = skb_headlen(skb);
5492 len = skb_shinfo(skb)->frags[i-1].size;
5494 pci_unmap_single(tp->pdev,
5495 dma_unmap_addr(&tnapi->tx_buffers[entry],
5497 len, PCI_DMA_TODEVICE);
5499 tnapi->tx_buffers[entry].skb = new_skb;
5500 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5503 tnapi->tx_buffers[entry].skb = NULL;
5505 entry = NEXT_TX(entry);
5514 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5515 dma_addr_t mapping, int len, u32 flags,
5518 struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5519 int is_end = (mss_and_is_end & 0x1);
5520 u32 mss = (mss_and_is_end >> 1);
5524 flags |= TXD_FLAG_END;
5525 if (flags & TXD_FLAG_VLAN) {
5526 vlan_tag = flags >> 16;
5529 vlan_tag |= (mss << TXD_MSS_SHIFT);
5531 txd->addr_hi = ((u64) mapping >> 32);
5532 txd->addr_lo = ((u64) mapping & 0xffffffff);
5533 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5534 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5537 /* hard_start_xmit for devices that don't have any bugs and
5538 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5540 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5541 struct net_device *dev)
5543 struct tg3 *tp = netdev_priv(dev);
5544 u32 len, entry, base_flags, mss;
5546 struct tg3_napi *tnapi;
5547 struct netdev_queue *txq;
5548 unsigned int i, last;
5550 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5551 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5552 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5555 /* We are running in BH disabled context with netif_tx_lock
5556 * and TX reclaim runs via tp->napi.poll inside of a software
5557 * interrupt. Furthermore, IRQ processing runs lockless so we have
5558 * no IRQ context deadlocks to worry about either. Rejoice!
5560 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5561 if (!netif_tx_queue_stopped(txq)) {
5562 netif_tx_stop_queue(txq);
5564 /* This is a hard error, log it. */
5566 "BUG! Tx Ring full when queue awake!\n");
5568 return NETDEV_TX_BUSY;
5571 entry = tnapi->tx_prod;
5574 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5575 int tcp_opt_len, ip_tcp_len;
5578 if (skb_header_cloned(skb) &&
5579 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5584 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
5585 hdrlen = skb_headlen(skb) - ETH_HLEN;
5587 struct iphdr *iph = ip_hdr(skb);
5589 tcp_opt_len = tcp_optlen(skb);
5590 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5593 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5594 hdrlen = ip_tcp_len + tcp_opt_len;
5597 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5598 mss |= (hdrlen & 0xc) << 12;
5600 base_flags |= 0x00000010;
5601 base_flags |= (hdrlen & 0x3e0) << 5;
5605 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5606 TXD_FLAG_CPU_POST_DMA);
5608 tcp_hdr(skb)->check = 0;
5610 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5611 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5614 #if TG3_VLAN_TAG_USED
5615 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5616 base_flags |= (TXD_FLAG_VLAN |
5617 (vlan_tx_tag_get(skb) << 16));
5620 len = skb_headlen(skb);
5622 /* Queue skb data, a.k.a. the main skb fragment. */
5623 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5624 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5629 tnapi->tx_buffers[entry].skb = skb;
5630 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5632 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5633 !mss && skb->len > ETH_DATA_LEN)
5634 base_flags |= TXD_FLAG_JMB_PKT;
5636 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5637 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5639 entry = NEXT_TX(entry);
5641 /* Now loop through additional data fragments, and queue them. */
5642 if (skb_shinfo(skb)->nr_frags > 0) {
5643 last = skb_shinfo(skb)->nr_frags - 1;
5644 for (i = 0; i <= last; i++) {
5645 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5648 mapping = pci_map_page(tp->pdev,
5651 len, PCI_DMA_TODEVICE);
5652 if (pci_dma_mapping_error(tp->pdev, mapping))
5655 tnapi->tx_buffers[entry].skb = NULL;
5656 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5659 tg3_set_txd(tnapi, entry, mapping, len,
5660 base_flags, (i == last) | (mss << 1));
5662 entry = NEXT_TX(entry);
5666 /* Packets are ready, update Tx producer idx local and on card. */
5667 tw32_tx_mbox(tnapi->prodmbox, entry);
5669 tnapi->tx_prod = entry;
5670 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5671 netif_tx_stop_queue(txq);
5672 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5673 netif_tx_wake_queue(txq);
5679 return NETDEV_TX_OK;
5683 entry = tnapi->tx_prod;
5684 tnapi->tx_buffers[entry].skb = NULL;
5685 pci_unmap_single(tp->pdev,
5686 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5689 for (i = 0; i <= last; i++) {
5690 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5691 entry = NEXT_TX(entry);
5693 pci_unmap_page(tp->pdev,
5694 dma_unmap_addr(&tnapi->tx_buffers[entry],
5696 frag->size, PCI_DMA_TODEVICE);
5700 return NETDEV_TX_OK;
5703 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *,
5704 struct net_device *);
5706 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5707 * TSO header is greater than 80 bytes.
5709 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5711 struct sk_buff *segs, *nskb;
5712 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5714 /* Estimate the number of fragments in the worst case */
5715 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5716 netif_stop_queue(tp->dev);
5717 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5718 return NETDEV_TX_BUSY;
5720 netif_wake_queue(tp->dev);
5723 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5725 goto tg3_tso_bug_end;
5731 tg3_start_xmit_dma_bug(nskb, tp->dev);
5737 return NETDEV_TX_OK;
5740 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5741 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
5743 static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5744 struct net_device *dev)
5746 struct tg3 *tp = netdev_priv(dev);
5747 u32 len, entry, base_flags, mss;
5748 int would_hit_hwbug;
5750 struct tg3_napi *tnapi;
5751 struct netdev_queue *txq;
5752 unsigned int i, last;
5754 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5755 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5756 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
5759 /* We are running in BH disabled context with netif_tx_lock
5760 * and TX reclaim runs via tp->napi.poll inside of a software
5761 * interrupt. Furthermore, IRQ processing runs lockless so we have
5762 * no IRQ context deadlocks to worry about either. Rejoice!
5764 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5765 if (!netif_tx_queue_stopped(txq)) {
5766 netif_tx_stop_queue(txq);
5768 /* This is a hard error, log it. */
5770 "BUG! Tx Ring full when queue awake!\n");
5772 return NETDEV_TX_BUSY;
5775 entry = tnapi->tx_prod;
5777 if (skb->ip_summed == CHECKSUM_PARTIAL)
5778 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5780 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5782 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5784 if (skb_header_cloned(skb) &&
5785 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5790 tcp_opt_len = tcp_optlen(skb);
5791 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5793 hdr_len = ip_tcp_len + tcp_opt_len;
5794 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5795 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5796 return tg3_tso_bug(tp, skb);
5798 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5799 TXD_FLAG_CPU_POST_DMA);
5803 iph->tot_len = htons(mss + hdr_len);
5804 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5805 tcp_hdr(skb)->check = 0;
5806 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5808 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5813 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5814 mss |= (hdr_len & 0xc) << 12;
5816 base_flags |= 0x00000010;
5817 base_flags |= (hdr_len & 0x3e0) << 5;
5818 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5819 mss |= hdr_len << 9;
5820 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5821 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5822 if (tcp_opt_len || iph->ihl > 5) {
5825 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5826 mss |= (tsflags << 11);
5829 if (tcp_opt_len || iph->ihl > 5) {
5832 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5833 base_flags |= tsflags << 12;
5837 #if TG3_VLAN_TAG_USED
5838 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5839 base_flags |= (TXD_FLAG_VLAN |
5840 (vlan_tx_tag_get(skb) << 16));
5843 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5844 !mss && skb->len > ETH_DATA_LEN)
5845 base_flags |= TXD_FLAG_JMB_PKT;
5847 len = skb_headlen(skb);
5849 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
5850 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5855 tnapi->tx_buffers[entry].skb = skb;
5856 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5858 would_hit_hwbug = 0;
5860 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5861 would_hit_hwbug = 1;
5863 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5864 tg3_4g_overflow_test(mapping, len))
5865 would_hit_hwbug = 1;
5867 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5868 tg3_40bit_overflow_test(tp, mapping, len))
5869 would_hit_hwbug = 1;
5871 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5872 would_hit_hwbug = 1;
5874 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
5875 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5877 entry = NEXT_TX(entry);
5879 /* Now loop through additional data fragments, and queue them. */
5880 if (skb_shinfo(skb)->nr_frags > 0) {
5881 last = skb_shinfo(skb)->nr_frags - 1;
5882 for (i = 0; i <= last; i++) {
5883 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5886 mapping = pci_map_page(tp->pdev,
5889 len, PCI_DMA_TODEVICE);
5891 tnapi->tx_buffers[entry].skb = NULL;
5892 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5894 if (pci_dma_mapping_error(tp->pdev, mapping))
5897 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5899 would_hit_hwbug = 1;
5901 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5902 tg3_4g_overflow_test(mapping, len))
5903 would_hit_hwbug = 1;
5905 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5906 tg3_40bit_overflow_test(tp, mapping, len))
5907 would_hit_hwbug = 1;
5909 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5910 tg3_set_txd(tnapi, entry, mapping, len,
5911 base_flags, (i == last)|(mss << 1));
5913 tg3_set_txd(tnapi, entry, mapping, len,
5914 base_flags, (i == last));
5916 entry = NEXT_TX(entry);
5920 if (would_hit_hwbug) {
5921 u32 last_plus_one = entry;
5924 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5925 start &= (TG3_TX_RING_SIZE - 1);
5927 /* If the workaround fails due to memory/mapping
5928 * failure, silently drop this packet.
5930 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5931 &start, base_flags, mss))
5937 /* Packets are ready, update Tx producer idx local and on card. */
5938 tw32_tx_mbox(tnapi->prodmbox, entry);
5940 tnapi->tx_prod = entry;
5941 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5942 netif_tx_stop_queue(txq);
5943 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5944 netif_tx_wake_queue(txq);
5950 return NETDEV_TX_OK;
5954 entry = tnapi->tx_prod;
5955 tnapi->tx_buffers[entry].skb = NULL;
5956 pci_unmap_single(tp->pdev,
5957 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5960 for (i = 0; i <= last; i++) {
5961 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5962 entry = NEXT_TX(entry);
5964 pci_unmap_page(tp->pdev,
5965 dma_unmap_addr(&tnapi->tx_buffers[entry],
5967 frag->size, PCI_DMA_TODEVICE);
5971 return NETDEV_TX_OK;
5974 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5979 if (new_mtu > ETH_DATA_LEN) {
5980 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5981 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5982 ethtool_op_set_tso(dev, 0);
5984 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5987 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5988 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5989 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5993 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5995 struct tg3 *tp = netdev_priv(dev);
5998 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6001 if (!netif_running(dev)) {
6002 /* We'll just catch it later when the
6005 tg3_set_mtu(dev, tp, new_mtu);
6013 tg3_full_lock(tp, 1);
6015 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6017 tg3_set_mtu(dev, tp, new_mtu);
6019 err = tg3_restart_hw(tp, 0);
6022 tg3_netif_start(tp);
6024 tg3_full_unlock(tp);
6032 static void tg3_rx_prodring_free(struct tg3 *tp,
6033 struct tg3_rx_prodring_set *tpr)
6037 if (tpr != &tp->prodring[0]) {
6038 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6039 i = (i + 1) % TG3_RX_RING_SIZE)
6040 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6043 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6044 for (i = tpr->rx_jmb_cons_idx;
6045 i != tpr->rx_jmb_prod_idx;
6046 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
6047 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6055 for (i = 0; i < TG3_RX_RING_SIZE; i++)
6056 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6059 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6060 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
6061 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6066 /* Initialize rx rings for packet processing.
6068 * The chip has been shut down and the driver detached from
6069 * the networking, so no interrupts or new tx packets will
6070 * end up in the driver. tp->{tx,}lock are held and thus
6073 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6074 struct tg3_rx_prodring_set *tpr)
6076 u32 i, rx_pkt_dma_sz;
6078 tpr->rx_std_cons_idx = 0;
6079 tpr->rx_std_prod_idx = 0;
6080 tpr->rx_jmb_cons_idx = 0;
6081 tpr->rx_jmb_prod_idx = 0;
6083 if (tpr != &tp->prodring[0]) {
6084 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
6085 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
6086 memset(&tpr->rx_jmb_buffers[0], 0,
6087 TG3_RX_JMB_BUFF_RING_SIZE);
6091 /* Zero out all descriptors. */
6092 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
6094 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6095 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
6096 tp->dev->mtu > ETH_DATA_LEN)
6097 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6098 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6100 /* Initialize invariants of the rings, we only set this
6101 * stuff once. This works because the card does not
6102 * write into the rx buffer posting rings.
6104 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
6105 struct tg3_rx_buffer_desc *rxd;
6107 rxd = &tpr->rx_std[i];
6108 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6109 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6110 rxd->opaque = (RXD_OPAQUE_RING_STD |
6111 (i << RXD_OPAQUE_INDEX_SHIFT));
6114 /* Now allocate fresh SKBs for each rx ring. */
6115 for (i = 0; i < tp->rx_pending; i++) {
6116 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6117 netdev_warn(tp->dev,
6118 "Using a smaller RX standard ring. Only "
6119 "%d out of %d buffers were allocated "
6120 "successfully\n", i, tp->rx_pending);
6128 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE))
6131 memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
6133 if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
6136 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
6137 struct tg3_rx_buffer_desc *rxd;
6139 rxd = &tpr->rx_jmb[i].std;
6140 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6141 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6143 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6144 (i << RXD_OPAQUE_INDEX_SHIFT));
6147 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6148 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6149 netdev_warn(tp->dev,
6150 "Using a smaller RX jumbo ring. Only %d "
6151 "out of %d buffers were allocated "
6152 "successfully\n", i, tp->rx_jumbo_pending);
6155 tp->rx_jumbo_pending = i;
6164 tg3_rx_prodring_free(tp, tpr);
6168 static void tg3_rx_prodring_fini(struct tg3 *tp,
6169 struct tg3_rx_prodring_set *tpr)
6171 kfree(tpr->rx_std_buffers);
6172 tpr->rx_std_buffers = NULL;
6173 kfree(tpr->rx_jmb_buffers);
6174 tpr->rx_jmb_buffers = NULL;
6176 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
6177 tpr->rx_std, tpr->rx_std_mapping);
6181 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
6182 tpr->rx_jmb, tpr->rx_jmb_mapping);
6187 static int tg3_rx_prodring_init(struct tg3 *tp,
6188 struct tg3_rx_prodring_set *tpr)
6190 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
6191 if (!tpr->rx_std_buffers)
6194 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
6195 &tpr->rx_std_mapping);
6199 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
6200 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
6202 if (!tpr->rx_jmb_buffers)
6205 tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
6206 TG3_RX_JUMBO_RING_BYTES,
6207 &tpr->rx_jmb_mapping);
6215 tg3_rx_prodring_fini(tp, tpr);
6219 /* Free up pending packets in all rx/tx rings.
6221 * The chip has been shut down and the driver detached from
6222 * the networking, so no interrupts or new tx packets will
6223 * end up in the driver. tp->{tx,}lock is not held and we are not
6224 * in an interrupt context and thus may sleep.
6226 static void tg3_free_rings(struct tg3 *tp)
6230 for (j = 0; j < tp->irq_cnt; j++) {
6231 struct tg3_napi *tnapi = &tp->napi[j];
6233 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6235 if (!tnapi->tx_buffers)
6238 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6239 struct ring_info *txp;
6240 struct sk_buff *skb;
6243 txp = &tnapi->tx_buffers[i];
6251 pci_unmap_single(tp->pdev,
6252 dma_unmap_addr(txp, mapping),
6259 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6260 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6261 pci_unmap_page(tp->pdev,
6262 dma_unmap_addr(txp, mapping),
6263 skb_shinfo(skb)->frags[k].size,
6268 dev_kfree_skb_any(skb);
6273 /* Initialize tx/rx rings for packet processing.
6275 * The chip has been shut down and the driver detached from
6276 * the networking, so no interrupts or new tx packets will
6277 * end up in the driver. tp->{tx,}lock are held and thus
6280 static int tg3_init_rings(struct tg3 *tp)
6284 /* Free up all the SKBs. */
6287 for (i = 0; i < tp->irq_cnt; i++) {
6288 struct tg3_napi *tnapi = &tp->napi[i];
6290 tnapi->last_tag = 0;
6291 tnapi->last_irq_tag = 0;
6292 tnapi->hw_status->status = 0;
6293 tnapi->hw_status->status_tag = 0;
6294 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6299 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6301 tnapi->rx_rcb_ptr = 0;
6303 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6305 if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
6315 * Must not be invoked with interrupt sources disabled and
6316 * the hardware shutdown down.
6318 static void tg3_free_consistent(struct tg3 *tp)
6322 for (i = 0; i < tp->irq_cnt; i++) {
6323 struct tg3_napi *tnapi = &tp->napi[i];
6325 if (tnapi->tx_ring) {
6326 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
6327 tnapi->tx_ring, tnapi->tx_desc_mapping);
6328 tnapi->tx_ring = NULL;
6331 kfree(tnapi->tx_buffers);
6332 tnapi->tx_buffers = NULL;
6334 if (tnapi->rx_rcb) {
6335 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
6337 tnapi->rx_rcb_mapping);
6338 tnapi->rx_rcb = NULL;
6341 if (tnapi->hw_status) {
6342 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
6344 tnapi->status_mapping);
6345 tnapi->hw_status = NULL;
6350 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
6351 tp->hw_stats, tp->stats_mapping);
6352 tp->hw_stats = NULL;
6355 for (i = 0; i < tp->irq_cnt; i++)
6356 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
6360 * Must not be invoked with interrupt sources disabled and
6361 * the hardware shutdown down. Can sleep.
6363 static int tg3_alloc_consistent(struct tg3 *tp)
6367 for (i = 0; i < tp->irq_cnt; i++) {
6368 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6372 tp->hw_stats = pci_alloc_consistent(tp->pdev,
6373 sizeof(struct tg3_hw_stats),
6374 &tp->stats_mapping);
6378 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6380 for (i = 0; i < tp->irq_cnt; i++) {
6381 struct tg3_napi *tnapi = &tp->napi[i];
6382 struct tg3_hw_status *sblk;
6384 tnapi->hw_status = pci_alloc_consistent(tp->pdev,
6386 &tnapi->status_mapping);
6387 if (!tnapi->hw_status)
6390 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6391 sblk = tnapi->hw_status;
6393 /* If multivector TSS is enabled, vector 0 does not handle
6394 * tx interrupts. Don't allocate any resources for it.
6396 if ((!i && !(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) ||
6397 (i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))) {
6398 tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6401 if (!tnapi->tx_buffers)
6404 tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
6406 &tnapi->tx_desc_mapping);
6407 if (!tnapi->tx_ring)
6412 * When RSS is enabled, the status block format changes
6413 * slightly. The "rx_jumbo_consumer", "reserved",
6414 * and "rx_mini_consumer" members get mapped to the
6415 * other three rx return ring producer indexes.
6419 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6422 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6425 tnapi->rx_rcb_prod_idx = &sblk->reserved;
6428 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6432 tnapi->prodring = &tp->prodring[i];
6435 * If multivector RSS is enabled, vector 0 does not handle
6436 * rx or tx interrupts. Don't allocate any resources for it.
6438 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6441 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
6442 TG3_RX_RCB_RING_BYTES(tp),
6443 &tnapi->rx_rcb_mapping);
6447 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6453 tg3_free_consistent(tp);
6457 #define MAX_WAIT_CNT 1000
6459 /* To stop a block, clear the enable bit and poll till it
6460 * clears. tp->lock is held.
6462 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6467 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
6474 /* We can't enable/disable these bits of the
6475 * 5705/5750, just say success.
6488 for (i = 0; i < MAX_WAIT_CNT; i++) {
6491 if ((val & enable_bit) == 0)
6495 if (i == MAX_WAIT_CNT && !silent) {
6496 dev_err(&tp->pdev->dev,
6497 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6505 /* tp->lock is held. */
6506 static int tg3_abort_hw(struct tg3 *tp, int silent)
6510 tg3_disable_ints(tp);
6512 tp->rx_mode &= ~RX_MODE_ENABLE;
6513 tw32_f(MAC_RX_MODE, tp->rx_mode);
6516 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6517 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6518 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6519 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6520 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6521 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6523 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6524 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6525 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6526 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6527 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6528 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6529 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6531 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6532 tw32_f(MAC_MODE, tp->mac_mode);
6535 tp->tx_mode &= ~TX_MODE_ENABLE;
6536 tw32_f(MAC_TX_MODE, tp->tx_mode);
6538 for (i = 0; i < MAX_WAIT_CNT; i++) {
6540 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6543 if (i >= MAX_WAIT_CNT) {
6544 dev_err(&tp->pdev->dev,
6545 "%s timed out, TX_MODE_ENABLE will not clear "
6546 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6550 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6551 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6552 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6554 tw32(FTQ_RESET, 0xffffffff);
6555 tw32(FTQ_RESET, 0x00000000);
6557 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6558 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6560 for (i = 0; i < tp->irq_cnt; i++) {
6561 struct tg3_napi *tnapi = &tp->napi[i];
6562 if (tnapi->hw_status)
6563 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6566 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6571 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6576 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6577 if (apedata != APE_SEG_SIG_MAGIC)
6580 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6581 if (!(apedata & APE_FW_STATUS_READY))
6584 /* Wait for up to 1 millisecond for APE to service previous event. */
6585 for (i = 0; i < 10; i++) {
6586 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6589 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6591 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6592 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6593 event | APE_EVENT_STATUS_EVENT_PENDING);
6595 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6597 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6603 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6604 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6607 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6612 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
6616 case RESET_KIND_INIT:
6617 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6618 APE_HOST_SEG_SIG_MAGIC);
6619 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6620 APE_HOST_SEG_LEN_MAGIC);
6621 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6622 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6623 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6624 APE_HOST_DRIVER_ID_MAGIC);
6625 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6626 APE_HOST_BEHAV_NO_PHYLOCK);
6628 event = APE_EVENT_STATUS_STATE_START;
6630 case RESET_KIND_SHUTDOWN:
6631 /* With the interface we are currently using,
6632 * APE does not track driver state. Wiping
6633 * out the HOST SEGMENT SIGNATURE forces
6634 * the APE to assume OS absent status.
6636 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6638 event = APE_EVENT_STATUS_STATE_UNLOAD;
6640 case RESET_KIND_SUSPEND:
6641 event = APE_EVENT_STATUS_STATE_SUSPEND;
6647 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6649 tg3_ape_send_event(tp, event);
6652 /* tp->lock is held. */
6653 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6655 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6656 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6658 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6660 case RESET_KIND_INIT:
6661 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6665 case RESET_KIND_SHUTDOWN:
6666 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6670 case RESET_KIND_SUSPEND:
6671 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6680 if (kind == RESET_KIND_INIT ||
6681 kind == RESET_KIND_SUSPEND)
6682 tg3_ape_driver_state_change(tp, kind);
6685 /* tp->lock is held. */
6686 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6688 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
6690 case RESET_KIND_INIT:
6691 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6692 DRV_STATE_START_DONE);
6695 case RESET_KIND_SHUTDOWN:
6696 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6697 DRV_STATE_UNLOAD_DONE);
6705 if (kind == RESET_KIND_SHUTDOWN)
6706 tg3_ape_driver_state_change(tp, kind);
6709 /* tp->lock is held. */
6710 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6712 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
6714 case RESET_KIND_INIT:
6715 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6719 case RESET_KIND_SHUTDOWN:
6720 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6724 case RESET_KIND_SUSPEND:
6725 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6735 static int tg3_poll_fw(struct tg3 *tp)
6740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6741 /* Wait up to 20ms for init done. */
6742 for (i = 0; i < 200; i++) {
6743 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6750 /* Wait for firmware initialization to complete. */
6751 for (i = 0; i < 100000; i++) {
6752 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6753 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6758 /* Chip might not be fitted with firmware. Some Sun onboard
6759 * parts are configured like that. So don't signal the timeout
6760 * of the above loop as an error, but do report the lack of
6761 * running firmware once.
6764 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
6765 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
6767 netdev_info(tp->dev, "No firmware running\n");
6770 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
6771 /* The 57765 A0 needs a little more
6772 * time to do some important work.
6780 /* Save PCI command register before chip reset */
6781 static void tg3_save_pci_state(struct tg3 *tp)
6783 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
6786 /* Restore PCI state after chip reset */
6787 static void tg3_restore_pci_state(struct tg3 *tp)
6791 /* Re-enable indirect register accesses. */
6792 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6793 tp->misc_host_ctrl);
6795 /* Set MAX PCI retry to zero. */
6796 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
6797 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
6798 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
6799 val |= PCISTATE_RETRY_SAME_DMA;
6800 /* Allow reads and writes to the APE register and memory space. */
6801 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
6802 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
6803 PCISTATE_ALLOW_APE_SHMEM_WR |
6804 PCISTATE_ALLOW_APE_PSPACE_WR;
6805 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
6807 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
6809 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6810 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6811 pcie_set_readrq(tp->pdev, 4096);
6813 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6814 tp->pci_cacheline_sz);
6815 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
6820 /* Make sure PCI-X relaxed ordering bit is clear. */
6821 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
6824 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6826 pcix_cmd &= ~PCI_X_CMD_ERO;
6827 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
6831 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
6833 /* Chip reset on 5780 will reset MSI enable bit,
6834 * so need to restore it.
6836 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
6839 pci_read_config_word(tp->pdev,
6840 tp->msi_cap + PCI_MSI_FLAGS,
6842 pci_write_config_word(tp->pdev,
6843 tp->msi_cap + PCI_MSI_FLAGS,
6844 ctrl | PCI_MSI_FLAGS_ENABLE);
6845 val = tr32(MSGINT_MODE);
6846 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
6851 static void tg3_stop_fw(struct tg3 *);
6853 /* tp->lock is held. */
6854 static int tg3_chip_reset(struct tg3 *tp)
6857 void (*write_op)(struct tg3 *, u32, u32);
6862 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
6864 /* No matching tg3_nvram_unlock() after this because
6865 * chip reset below will undo the nvram lock.
6867 tp->nvram_lock_cnt = 0;
6869 /* GRC_MISC_CFG core clock reset will clear the memory
6870 * enable bit in PCI register 4 and the MSI enable bit
6871 * on some chips, so we save relevant registers here.
6873 tg3_save_pci_state(tp);
6875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
6876 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
6877 tw32(GRC_FASTBOOT_PC, 0);
6880 * We must avoid the readl() that normally takes place.
6881 * It locks machines, causes machine checks, and other
6882 * fun things. So, temporarily disable the 5701
6883 * hardware workaround, while we do the reset.
6885 write_op = tp->write32;
6886 if (write_op == tg3_write_flush_reg32)
6887 tp->write32 = tg3_write32;
6889 /* Prevent the irq handler from reading or writing PCI registers
6890 * during chip reset when the memory enable bit in the PCI command
6891 * register may be cleared. The chip does not generate interrupt
6892 * at this time, but the irq handler may still be called due to irq
6893 * sharing or irqpoll.
6895 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
6896 for (i = 0; i < tp->irq_cnt; i++) {
6897 struct tg3_napi *tnapi = &tp->napi[i];
6898 if (tnapi->hw_status) {
6899 tnapi->hw_status->status = 0;
6900 tnapi->hw_status->status_tag = 0;
6902 tnapi->last_tag = 0;
6903 tnapi->last_irq_tag = 0;
6907 for (i = 0; i < tp->irq_cnt; i++)
6908 synchronize_irq(tp->napi[i].irq_vec);
6910 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6911 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
6912 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
6916 val = GRC_MISC_CFG_CORECLK_RESET;
6918 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6919 if (tr32(0x7e2c) == 0x60) {
6922 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6923 tw32(GRC_MISC_CFG, (1 << 29));
6928 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6929 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
6930 tw32(GRC_VCPU_EXT_CTRL,
6931 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
6934 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6935 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
6936 tw32(GRC_MISC_CFG, val);
6938 /* restore 5701 hardware bug workaround write method */
6939 tp->write32 = write_op;
6941 /* Unfortunately, we have to delay before the PCI read back.
6942 * Some 575X chips even will not respond to a PCI cfg access
6943 * when the reset command is given to the chip.
6945 * How do these hardware designers expect things to work
6946 * properly if the PCI write is posted for a long period
6947 * of time? It is always necessary to have some method by
6948 * which a register read back can occur to push the write
6949 * out which does the reset.
6951 * For most tg3 variants the trick below was working.
6956 /* Flush PCI posted writes. The normal MMIO registers
6957 * are inaccessible at this time so this is the only
6958 * way to make this reliably (actually, this is no longer
6959 * the case, see above). I tried to use indirect
6960 * register read/write but this upset some 5701 variants.
6962 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6966 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && tp->pcie_cap) {
6969 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6973 /* Wait for link training to complete. */
6974 for (i = 0; i < 5000; i++)
6977 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6978 pci_write_config_dword(tp->pdev, 0xc4,
6979 cfg_val | (1 << 15));
6982 /* Clear the "no snoop" and "relaxed ordering" bits. */
6983 pci_read_config_word(tp->pdev,
6984 tp->pcie_cap + PCI_EXP_DEVCTL,
6986 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
6987 PCI_EXP_DEVCTL_NOSNOOP_EN);
6989 * Older PCIe devices only support the 128 byte
6990 * MPS setting. Enforce the restriction.
6992 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
6993 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784))
6994 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
6995 pci_write_config_word(tp->pdev,
6996 tp->pcie_cap + PCI_EXP_DEVCTL,
6999 pcie_set_readrq(tp->pdev, 4096);
7001 /* Clear error status */
7002 pci_write_config_word(tp->pdev,
7003 tp->pcie_cap + PCI_EXP_DEVSTA,
7004 PCI_EXP_DEVSTA_CED |
7005 PCI_EXP_DEVSTA_NFED |
7006 PCI_EXP_DEVSTA_FED |
7007 PCI_EXP_DEVSTA_URD);
7010 tg3_restore_pci_state(tp);
7012 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
7015 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
7016 val = tr32(MEMARB_MODE);
7017 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7019 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7021 tw32(0x5000, 0x400);
7024 tw32(GRC_MODE, tp->grc_mode);
7026 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7029 tw32(0xc4, val | (1 << 15));
7032 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7034 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7035 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7036 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7037 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7040 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7041 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7042 tw32_f(MAC_MODE, tp->mac_mode);
7043 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7044 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7045 tw32_f(MAC_MODE, tp->mac_mode);
7046 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7047 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7048 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7049 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7050 tw32_f(MAC_MODE, tp->mac_mode);
7052 tw32_f(MAC_MODE, 0);
7055 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7057 err = tg3_poll_fw(tp);
7063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7066 phy_addr = tp->phy_addr;
7067 tp->phy_addr = TG3_PHY_PCIE_ADDR;
7069 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7070 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
7071 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
7072 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
7073 TG3_PCIEPHY_TX0CTRL1_NB_EN;
7074 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
7077 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
7078 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
7079 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
7080 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
7081 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
7084 tp->phy_addr = phy_addr;
7087 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
7088 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7089 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7090 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7091 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
7092 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
7095 tw32(0x7c00, val | (1 << 25));
7098 /* Reprobe ASF enable state. */
7099 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
7100 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
7101 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7102 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7105 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7106 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7107 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7108 tp->last_event_jiffies = jiffies;
7109 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
7110 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7117 /* tp->lock is held. */
7118 static void tg3_stop_fw(struct tg3 *tp)
7120 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7121 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7122 /* Wait for RX cpu to ACK the previous event. */
7123 tg3_wait_for_event_ack(tp);
7125 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7127 tg3_generate_fw_event(tp);
7129 /* Wait for RX cpu to ACK this event. */
7130 tg3_wait_for_event_ack(tp);
7134 /* tp->lock is held. */
7135 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7141 tg3_write_sig_pre_reset(tp, kind);
7143 tg3_abort_hw(tp, silent);
7144 err = tg3_chip_reset(tp);
7146 __tg3_set_mac_addr(tp, 0);
7148 tg3_write_sig_legacy(tp, kind);
7149 tg3_write_sig_post_reset(tp, kind);
7157 #define RX_CPU_SCRATCH_BASE 0x30000
7158 #define RX_CPU_SCRATCH_SIZE 0x04000
7159 #define TX_CPU_SCRATCH_BASE 0x34000
7160 #define TX_CPU_SCRATCH_SIZE 0x04000
7162 /* tp->lock is held. */
7163 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7167 BUG_ON(offset == TX_CPU_BASE &&
7168 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
7170 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7171 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7173 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7176 if (offset == RX_CPU_BASE) {
7177 for (i = 0; i < 10000; i++) {
7178 tw32(offset + CPU_STATE, 0xffffffff);
7179 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7180 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7184 tw32(offset + CPU_STATE, 0xffffffff);
7185 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
7188 for (i = 0; i < 10000; i++) {
7189 tw32(offset + CPU_STATE, 0xffffffff);
7190 tw32(offset + CPU_MODE, CPU_MODE_HALT);
7191 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7197 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7198 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7202 /* Clear firmware's nvram arbitration. */
7203 if (tp->tg3_flags & TG3_FLAG_NVRAM)
7204 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7209 unsigned int fw_base;
7210 unsigned int fw_len;
7211 const __be32 *fw_data;
7214 /* tp->lock is held. */
7215 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7216 int cpu_scratch_size, struct fw_info *info)
7218 int err, lock_err, i;
7219 void (*write_op)(struct tg3 *, u32, u32);
7221 if (cpu_base == TX_CPU_BASE &&
7222 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7224 "%s: Trying to load TX cpu firmware which is 5705\n",
7229 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7230 write_op = tg3_write_mem;
7232 write_op = tg3_write_indirect_reg32;
7234 /* It is possible that bootcode is still loading at this point.
7235 * Get the nvram lock first before halting the cpu.
7237 lock_err = tg3_nvram_lock(tp);
7238 err = tg3_halt_cpu(tp, cpu_base);
7240 tg3_nvram_unlock(tp);
7244 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7245 write_op(tp, cpu_scratch_base + i, 0);
7246 tw32(cpu_base + CPU_STATE, 0xffffffff);
7247 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7248 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7249 write_op(tp, (cpu_scratch_base +
7250 (info->fw_base & 0xffff) +
7252 be32_to_cpu(info->fw_data[i]));
7260 /* tp->lock is held. */
7261 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7263 struct fw_info info;
7264 const __be32 *fw_data;
7267 fw_data = (void *)tp->fw->data;
7269 /* Firmware blob starts with version numbers, followed by
7270 start address and length. We are setting complete length.
7271 length = end_address_of_bss - start_address_of_text.
7272 Remainder is the blob to be loaded contiguously
7273 from start address. */
7275 info.fw_base = be32_to_cpu(fw_data[1]);
7276 info.fw_len = tp->fw->size - 12;
7277 info.fw_data = &fw_data[3];
7279 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7280 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7285 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7286 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7291 /* Now startup only the RX cpu. */
7292 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7293 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7295 for (i = 0; i < 5; i++) {
7296 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7298 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7299 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
7300 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7304 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7305 "should be %08x\n", __func__,
7306 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7309 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7310 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
7315 /* 5705 needs a special version of the TSO firmware. */
7317 /* tp->lock is held. */
7318 static int tg3_load_tso_firmware(struct tg3 *tp)
7320 struct fw_info info;
7321 const __be32 *fw_data;
7322 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7325 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7328 fw_data = (void *)tp->fw->data;
7330 /* Firmware blob starts with version numbers, followed by
7331 start address and length. We are setting complete length.
7332 length = end_address_of_bss - start_address_of_text.
7333 Remainder is the blob to be loaded contiguously
7334 from start address. */
7336 info.fw_base = be32_to_cpu(fw_data[1]);
7337 cpu_scratch_size = tp->fw_len;
7338 info.fw_len = tp->fw->size - 12;
7339 info.fw_data = &fw_data[3];
7341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7342 cpu_base = RX_CPU_BASE;
7343 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7345 cpu_base = TX_CPU_BASE;
7346 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7347 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7350 err = tg3_load_firmware_cpu(tp, cpu_base,
7351 cpu_scratch_base, cpu_scratch_size,
7356 /* Now startup the cpu. */
7357 tw32(cpu_base + CPU_STATE, 0xffffffff);
7358 tw32_f(cpu_base + CPU_PC, info.fw_base);
7360 for (i = 0; i < 5; i++) {
7361 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7363 tw32(cpu_base + CPU_STATE, 0xffffffff);
7364 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7365 tw32_f(cpu_base + CPU_PC, info.fw_base);
7370 "%s fails to set CPU PC, is %08x should be %08x\n",
7371 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7374 tw32(cpu_base + CPU_STATE, 0xffffffff);
7375 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7380 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7382 struct tg3 *tp = netdev_priv(dev);
7383 struct sockaddr *addr = p;
7384 int err = 0, skip_mac_1 = 0;
7386 if (!is_valid_ether_addr(addr->sa_data))
7389 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7391 if (!netif_running(dev))
7394 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7395 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7397 addr0_high = tr32(MAC_ADDR_0_HIGH);
7398 addr0_low = tr32(MAC_ADDR_0_LOW);
7399 addr1_high = tr32(MAC_ADDR_1_HIGH);
7400 addr1_low = tr32(MAC_ADDR_1_LOW);
7402 /* Skip MAC addr 1 if ASF is using it. */
7403 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7404 !(addr1_high == 0 && addr1_low == 0))
7407 spin_lock_bh(&tp->lock);
7408 __tg3_set_mac_addr(tp, skip_mac_1);
7409 spin_unlock_bh(&tp->lock);
7414 /* tp->lock is held. */
7415 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7416 dma_addr_t mapping, u32 maxlen_flags,
7420 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7421 ((u64) mapping >> 32));
7423 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7424 ((u64) mapping & 0xffffffff));
7426 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7429 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7431 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7435 static void __tg3_set_rx_mode(struct net_device *);
7436 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7440 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)) {
7441 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7442 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7443 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7445 tw32(HOSTCC_TXCOL_TICKS, 0);
7446 tw32(HOSTCC_TXMAX_FRAMES, 0);
7447 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7450 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
7451 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7452 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7453 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7455 tw32(HOSTCC_RXCOL_TICKS, 0);
7456 tw32(HOSTCC_RXMAX_FRAMES, 0);
7457 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7460 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7461 u32 val = ec->stats_block_coalesce_usecs;
7463 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7464 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7466 if (!netif_carrier_ok(tp->dev))
7469 tw32(HOSTCC_STAT_COAL_TICKS, val);
7472 for (i = 0; i < tp->irq_cnt - 1; i++) {
7475 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7476 tw32(reg, ec->rx_coalesce_usecs);
7477 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7478 tw32(reg, ec->rx_max_coalesced_frames);
7479 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7480 tw32(reg, ec->rx_max_coalesced_frames_irq);
7482 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7483 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7484 tw32(reg, ec->tx_coalesce_usecs);
7485 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7486 tw32(reg, ec->tx_max_coalesced_frames);
7487 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7488 tw32(reg, ec->tx_max_coalesced_frames_irq);
7492 for (; i < tp->irq_max - 1; i++) {
7493 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7494 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7495 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7497 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) {
7498 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7499 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7500 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7505 /* tp->lock is held. */
7506 static void tg3_rings_reset(struct tg3 *tp)
7509 u32 stblk, txrcb, rxrcb, limit;
7510 struct tg3_napi *tnapi = &tp->napi[0];
7512 /* Disable all transmit rings but the first. */
7513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7514 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7515 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7516 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7518 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7520 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7521 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7522 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7523 BDINFO_FLAGS_DISABLED);
7526 /* Disable all receive return rings but the first. */
7527 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7528 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7529 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7530 else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7531 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7532 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7533 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7534 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7536 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7538 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7539 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7540 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7541 BDINFO_FLAGS_DISABLED);
7543 /* Disable interrupts */
7544 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7546 /* Zero mailbox registers. */
7547 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
7548 for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
7549 tp->napi[i].tx_prod = 0;
7550 tp->napi[i].tx_cons = 0;
7551 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
7552 tw32_mailbox(tp->napi[i].prodmbox, 0);
7553 tw32_rx_mbox(tp->napi[i].consmbox, 0);
7554 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7556 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
7557 tw32_mailbox(tp->napi[0].prodmbox, 0);
7559 tp->napi[0].tx_prod = 0;
7560 tp->napi[0].tx_cons = 0;
7561 tw32_mailbox(tp->napi[0].prodmbox, 0);
7562 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7565 /* Make sure the NIC-based send BD rings are disabled. */
7566 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7567 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7568 for (i = 0; i < 16; i++)
7569 tw32_tx_mbox(mbox + i * 8, 0);
7572 txrcb = NIC_SRAM_SEND_RCB;
7573 rxrcb = NIC_SRAM_RCV_RET_RCB;
7575 /* Clear status block in ram. */
7576 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7578 /* Set status block DMA address */
7579 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7580 ((u64) tnapi->status_mapping >> 32));
7581 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7582 ((u64) tnapi->status_mapping & 0xffffffff));
7584 if (tnapi->tx_ring) {
7585 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7586 (TG3_TX_RING_SIZE <<
7587 BDINFO_FLAGS_MAXLEN_SHIFT),
7588 NIC_SRAM_TX_BUFFER_DESC);
7589 txrcb += TG3_BDINFO_SIZE;
7592 if (tnapi->rx_rcb) {
7593 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7594 (TG3_RX_RCB_RING_SIZE(tp) <<
7595 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7596 rxrcb += TG3_BDINFO_SIZE;
7599 stblk = HOSTCC_STATBLCK_RING1;
7601 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7602 u64 mapping = (u64)tnapi->status_mapping;
7603 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7604 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7606 /* Clear status block in ram. */
7607 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7609 if (tnapi->tx_ring) {
7610 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7611 (TG3_TX_RING_SIZE <<
7612 BDINFO_FLAGS_MAXLEN_SHIFT),
7613 NIC_SRAM_TX_BUFFER_DESC);
7614 txrcb += TG3_BDINFO_SIZE;
7617 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7618 (TG3_RX_RCB_RING_SIZE(tp) <<
7619 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7622 rxrcb += TG3_BDINFO_SIZE;
7626 /* tp->lock is held. */
7627 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7629 u32 val, rdmac_mode;
7631 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
7633 tg3_disable_ints(tp);
7637 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7639 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7640 tg3_abort_hw(tp, 1);
7645 err = tg3_chip_reset(tp);
7649 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7651 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7652 val = tr32(TG3_CPMU_CTRL);
7653 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7654 tw32(TG3_CPMU_CTRL, val);
7656 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7657 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7658 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7659 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7661 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7662 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7663 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7664 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7666 val = tr32(TG3_CPMU_HST_ACC);
7667 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7668 val |= CPMU_HST_ACC_MACCLK_6_25;
7669 tw32(TG3_CPMU_HST_ACC, val);
7672 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7673 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7674 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7675 PCIE_PWR_MGMT_L1_THRESH_4MS;
7676 tw32(PCIE_PWR_MGMT_THRESH, val);
7678 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7679 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7681 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7683 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7684 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7687 if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
7688 u32 grc_mode = tr32(GRC_MODE);
7690 /* Access the lower 1K of PL PCIE block registers. */
7691 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7692 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7694 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7695 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7696 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7698 tw32(GRC_MODE, grc_mode);
7701 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7702 u32 grc_mode = tr32(GRC_MODE);
7704 /* Access the lower 1K of PL PCIE block registers. */
7705 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7706 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7708 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7709 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7710 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7712 tw32(GRC_MODE, grc_mode);
7714 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7715 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7716 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7717 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7720 /* This works around an issue with Athlon chipsets on
7721 * B3 tigon3 silicon. This bit has no effect on any
7722 * other revision. But do not set this on PCI Express
7723 * chips and don't even touch the clocks if the CPMU is present.
7725 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7726 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7727 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7728 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7731 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7732 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7733 val = tr32(TG3PCI_PCISTATE);
7734 val |= PCISTATE_RETRY_SAME_DMA;
7735 tw32(TG3PCI_PCISTATE, val);
7738 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7739 /* Allow reads and writes to the
7740 * APE register and memory space.
7742 val = tr32(TG3PCI_PCISTATE);
7743 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7744 PCISTATE_ALLOW_APE_SHMEM_WR |
7745 PCISTATE_ALLOW_APE_PSPACE_WR;
7746 tw32(TG3PCI_PCISTATE, val);
7749 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7750 /* Enable some hw fixes. */
7751 val = tr32(TG3PCI_MSI_DATA);
7752 val |= (1 << 26) | (1 << 28) | (1 << 29);
7753 tw32(TG3PCI_MSI_DATA, val);
7756 /* Descriptor ring init may make accesses to the
7757 * NIC SRAM area to setup the TX descriptors, so we
7758 * can only do this after the hardware has been
7759 * successfully reset.
7761 err = tg3_init_rings(tp);
7765 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7766 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7768 val = tr32(TG3PCI_DMA_RW_CTRL) &
7769 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7770 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7771 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7772 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7773 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7774 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7775 /* This value is determined during the probe time DMA
7776 * engine test, tg3_test_dma.
7778 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7781 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7782 GRC_MODE_4X_NIC_SEND_RINGS |
7783 GRC_MODE_NO_TX_PHDR_CSUM |
7784 GRC_MODE_NO_RX_PHDR_CSUM);
7785 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7787 /* Pseudo-header checksum is done by hardware logic and not
7788 * the offload processers, so make the chip do the pseudo-
7789 * header checksums on receive. For transmit it is more
7790 * convenient to do the pseudo-header checksum in software
7791 * as Linux does that on transmit for us in all cases.
7793 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7797 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7799 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7800 val = tr32(GRC_MISC_CFG);
7802 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7803 tw32(GRC_MISC_CFG, val);
7805 /* Initialize MBUF/DESC pool. */
7806 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7808 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7809 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7810 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7811 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7813 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7814 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7815 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7816 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7819 fw_len = tp->fw_len;
7820 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7821 tw32(BUFMGR_MB_POOL_ADDR,
7822 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7823 tw32(BUFMGR_MB_POOL_SIZE,
7824 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7827 if (tp->dev->mtu <= ETH_DATA_LEN) {
7828 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7829 tp->bufmgr_config.mbuf_read_dma_low_water);
7830 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7831 tp->bufmgr_config.mbuf_mac_rx_low_water);
7832 tw32(BUFMGR_MB_HIGH_WATER,
7833 tp->bufmgr_config.mbuf_high_water);
7835 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7836 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7837 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7838 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7839 tw32(BUFMGR_MB_HIGH_WATER,
7840 tp->bufmgr_config.mbuf_high_water_jumbo);
7842 tw32(BUFMGR_DMA_LOW_WATER,
7843 tp->bufmgr_config.dma_low_water);
7844 tw32(BUFMGR_DMA_HIGH_WATER,
7845 tp->bufmgr_config.dma_high_water);
7847 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7848 for (i = 0; i < 2000; i++) {
7849 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7854 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
7858 /* Setup replenish threshold. */
7859 val = tp->rx_pending / 8;
7862 else if (val > tp->rx_std_max_post)
7863 val = tp->rx_std_max_post;
7864 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7865 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7866 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7868 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7869 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7872 tw32(RCVBDI_STD_THRESH, val);
7874 /* Initialize TG3_BDINFO's at:
7875 * RCVDBDI_STD_BD: standard eth size rx ring
7876 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7877 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7880 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7881 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7882 * ring attribute flags
7883 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7885 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7886 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7888 * The size of each ring is fixed in the firmware, but the location is
7891 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7892 ((u64) tpr->rx_std_mapping >> 32));
7893 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7894 ((u64) tpr->rx_std_mapping & 0xffffffff));
7895 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
7896 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
7897 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7898 NIC_SRAM_RX_BUFFER_DESC);
7900 /* Disable the mini ring */
7901 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7902 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7903 BDINFO_FLAGS_DISABLED);
7905 /* Program the jumbo buffer descriptor ring control
7906 * blocks on those devices that have them.
7908 if ((tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) &&
7909 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
7910 /* Setup replenish threshold. */
7911 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7913 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7914 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7915 ((u64) tpr->rx_jmb_mapping >> 32));
7916 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7917 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
7918 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7919 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7920 BDINFO_FLAGS_USE_EXT_RECV);
7921 if (!(tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) ||
7922 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7923 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7924 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7926 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7927 BDINFO_FLAGS_DISABLED);
7930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7933 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7934 (TG3_RX_STD_DMA_SZ << 2);
7936 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7938 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7940 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7942 tpr->rx_std_prod_idx = tp->rx_pending;
7943 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7945 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7946 tp->rx_jumbo_pending : 0;
7947 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7949 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
7951 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7952 tw32(STD_REPLENISH_LWM, 32);
7953 tw32(JMB_REPLENISH_LWM, 16);
7956 tg3_rings_reset(tp);
7958 /* Initialize MAC address and backoff seed. */
7959 __tg3_set_mac_addr(tp, 0);
7961 /* MTU + ethernet header + FCS + optional VLAN tag */
7962 tw32(MAC_RX_MTU_SIZE,
7963 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
7965 /* The slot time is changed by tg3_setup_phy if we
7966 * run at gigabit with half duplex.
7968 tw32(MAC_TX_LENGTHS,
7969 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7970 (6 << TX_LENGTHS_IPG_SHIFT) |
7971 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7973 /* Receive rules. */
7974 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7975 tw32(RCVLPC_CONFIG, 0x0181);
7977 /* Calculate RDMAC_MODE setting early, we need it to determine
7978 * the RCVLPC_STATE_ENABLE mask.
7980 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7981 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7982 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7983 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7984 RDMAC_MODE_LNGREAD_ENAB);
7986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7987 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
7988 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
7990 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7991 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7992 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7993 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7994 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7995 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7997 /* If statement applies to 5705 and 5750 PCI devices only */
7998 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7999 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8000 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
8001 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
8002 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8003 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8004 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8005 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
8006 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8010 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
8011 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8013 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8014 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8016 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
8017 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8018 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8019 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8021 /* Receive/send statistics. */
8022 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
8023 val = tr32(RCVLPC_STATS_ENABLE);
8024 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8025 tw32(RCVLPC_STATS_ENABLE, val);
8026 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8027 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8028 val = tr32(RCVLPC_STATS_ENABLE);
8029 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8030 tw32(RCVLPC_STATS_ENABLE, val);
8032 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8034 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8035 tw32(SNDDATAI_STATSENAB, 0xffffff);
8036 tw32(SNDDATAI_STATSCTRL,
8037 (SNDDATAI_SCTRL_ENABLE |
8038 SNDDATAI_SCTRL_FASTUPD));
8040 /* Setup host coalescing engine. */
8041 tw32(HOSTCC_MODE, 0);
8042 for (i = 0; i < 2000; i++) {
8043 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8048 __tg3_set_coalesce(tp, &tp->coal);
8050 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8051 /* Status/statistics block address. See tg3_timer,
8052 * the tg3_periodic_fetch_stats call there, and
8053 * tg3_get_stats to see how this works for 5705/5750 chips.
8055 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8056 ((u64) tp->stats_mapping >> 32));
8057 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8058 ((u64) tp->stats_mapping & 0xffffffff));
8059 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8061 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8063 /* Clear statistics and status block memory areas */
8064 for (i = NIC_SRAM_STATS_BLK;
8065 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8067 tg3_write_mem(tp, i, 0);
8072 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8074 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8075 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8076 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8077 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8079 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
8080 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
8081 /* reset to prevent losing 1st rx packet intermittently */
8082 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8086 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8087 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8090 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8091 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
8092 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8093 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8094 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8095 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8096 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8099 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8100 * If TG3_FLG2_IS_NIC is zero, we should read the
8101 * register to preserve the GPIO settings for LOMs. The GPIOs,
8102 * whether used as inputs or outputs, are set by boot code after
8105 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
8108 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8109 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8110 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8113 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8114 GRC_LCLCTRL_GPIO_OUTPUT3;
8116 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8117 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8119 tp->grc_local_ctrl &= ~gpio_mask;
8120 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8122 /* GPIO1 must be driven high for eeprom write protect */
8123 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
8124 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8125 GRC_LCLCTRL_GPIO_OUTPUT1);
8127 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8130 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
8131 val = tr32(MSGINT_MODE);
8132 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8133 tw32(MSGINT_MODE, val);
8136 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
8137 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8141 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8142 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8143 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8144 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8145 WDMAC_MODE_LNGREAD_ENAB);
8147 /* If statement applies to 5705 and 5750 PCI devices only */
8148 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8149 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
8150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
8151 if ((tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8152 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8153 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8155 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8156 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
8157 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8158 val |= WDMAC_MODE_RX_ACCEL;
8162 /* Enable host coalescing bug fix */
8163 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8164 val |= WDMAC_MODE_STATUS_TAG_FIX;
8166 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8167 val |= WDMAC_MODE_BURST_ALL_DATA;
8169 tw32_f(WDMAC_MODE, val);
8172 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
8175 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8177 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8178 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8179 pcix_cmd |= PCI_X_CMD_READ_2K;
8180 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8181 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8182 pcix_cmd |= PCI_X_CMD_READ_2K;
8184 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8188 tw32_f(RDMAC_MODE, rdmac_mode);
8191 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8192 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
8193 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8195 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8197 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8199 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8201 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8202 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8203 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
8204 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8205 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
8206 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8207 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8208 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
8209 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8210 tw32(SNDBDI_MODE, val);
8211 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8213 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8214 err = tg3_load_5701_a0_firmware_fix(tp);
8219 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
8220 err = tg3_load_tso_firmware(tp);
8225 tp->tx_mode = TX_MODE_ENABLE;
8226 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
8227 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8228 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8229 tw32_f(MAC_TX_MODE, tp->tx_mode);
8232 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
8233 u32 reg = MAC_RSS_INDIR_TBL_0;
8234 u8 *ent = (u8 *)&val;
8236 /* Setup the indirection table */
8237 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8238 int idx = i % sizeof(val);
8240 ent[idx] = (i % (tp->irq_cnt - 1)) + 1;
8241 if (idx == sizeof(val) - 1) {
8247 /* Setup the "secret" hash key. */
8248 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8249 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8250 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8251 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8252 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8253 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8254 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8255 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8256 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8257 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8260 tp->rx_mode = RX_MODE_ENABLE;
8261 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
8262 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8264 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
8265 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8266 RX_MODE_RSS_ITBL_HASH_BITS_7 |
8267 RX_MODE_RSS_IPV6_HASH_EN |
8268 RX_MODE_RSS_TCP_IPV6_HASH_EN |
8269 RX_MODE_RSS_IPV4_HASH_EN |
8270 RX_MODE_RSS_TCP_IPV4_HASH_EN;
8272 tw32_f(MAC_RX_MODE, tp->rx_mode);
8275 tw32(MAC_LED_CTRL, tp->led_ctrl);
8277 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8278 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8279 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8282 tw32_f(MAC_RX_MODE, tp->rx_mode);
8285 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8286 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8287 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
8288 /* Set drive transmission level to 1.2V */
8289 /* only if the signal pre-emphasis bit is not set */
8290 val = tr32(MAC_SERDES_CFG);
8293 tw32(MAC_SERDES_CFG, val);
8295 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8296 tw32(MAC_SERDES_CFG, 0x616000);
8299 /* Prevent chip from dropping frames when flow control
8302 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8306 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8309 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8310 /* Use hardware link auto-negotiation */
8311 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
8314 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8315 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
8318 tmp = tr32(SERDES_RX_CTRL);
8319 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8320 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8321 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8322 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8325 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
8326 if (tp->link_config.phy_is_low_power) {
8327 tp->link_config.phy_is_low_power = 0;
8328 tp->link_config.speed = tp->link_config.orig_speed;
8329 tp->link_config.duplex = tp->link_config.orig_duplex;
8330 tp->link_config.autoneg = tp->link_config.orig_autoneg;
8333 err = tg3_setup_phy(tp, 0);
8337 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8338 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET)) {
8341 /* Clear CRC stats. */
8342 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8343 tg3_writephy(tp, MII_TG3_TEST1,
8344 tmp | MII_TG3_TEST1_CRC_EN);
8345 tg3_readphy(tp, 0x14, &tmp);
8350 __tg3_set_rx_mode(tp->dev);
8352 /* Initialize receive rules. */
8353 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
8354 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8355 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
8356 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8358 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
8359 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
8363 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
8367 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
8369 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
8371 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
8373 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
8375 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
8377 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
8379 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
8381 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
8383 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
8385 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
8387 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
8389 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
8391 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
8393 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
8401 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8402 /* Write our heartbeat update interval to APE. */
8403 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8404 APE_HOST_HEARTBEAT_INT_DISABLE);
8406 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8411 /* Called at device open time to get the chip ready for
8412 * packet processing. Invoked with tp->lock held.
8414 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8416 tg3_switch_clocks(tp);
8418 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8420 return tg3_reset_hw(tp, reset_phy);
8423 #define TG3_STAT_ADD32(PSTAT, REG) \
8424 do { u32 __val = tr32(REG); \
8425 (PSTAT)->low += __val; \
8426 if ((PSTAT)->low < __val) \
8427 (PSTAT)->high += 1; \
8430 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8432 struct tg3_hw_stats *sp = tp->hw_stats;
8434 if (!netif_carrier_ok(tp->dev))
8437 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8438 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8439 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8440 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8441 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8442 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8443 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8444 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8445 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8446 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8447 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8448 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8449 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8451 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8452 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8453 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8454 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8455 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8456 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8457 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8458 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8459 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8460 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8461 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8462 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8463 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8464 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8466 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8467 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8468 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8471 static void tg3_timer(unsigned long __opaque)
8473 struct tg3 *tp = (struct tg3 *) __opaque;
8478 spin_lock(&tp->lock);
8480 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8481 /* All of this garbage is because when using non-tagged
8482 * IRQ status the mailbox/status_block protocol the chip
8483 * uses with the cpu is race prone.
8485 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8486 tw32(GRC_LOCAL_CTRL,
8487 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8489 tw32(HOSTCC_MODE, tp->coalesce_mode |
8490 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8493 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8494 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
8495 spin_unlock(&tp->lock);
8496 schedule_work(&tp->reset_task);
8501 /* This part only runs once per second. */
8502 if (!--tp->timer_counter) {
8503 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
8504 tg3_periodic_fetch_stats(tp);
8506 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
8510 mac_stat = tr32(MAC_STATUS);
8513 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
8514 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8516 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8520 tg3_setup_phy(tp, 0);
8521 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
8522 u32 mac_stat = tr32(MAC_STATUS);
8525 if (netif_carrier_ok(tp->dev) &&
8526 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8529 if (! netif_carrier_ok(tp->dev) &&
8530 (mac_stat & (MAC_STATUS_PCS_SYNCED |
8531 MAC_STATUS_SIGNAL_DET))) {
8535 if (!tp->serdes_counter) {
8538 ~MAC_MODE_PORT_MODE_MASK));
8540 tw32_f(MAC_MODE, tp->mac_mode);
8543 tg3_setup_phy(tp, 0);
8545 } else if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
8546 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
8547 tg3_serdes_parallel_detect(tp);
8550 tp->timer_counter = tp->timer_multiplier;
8553 /* Heartbeat is only sent once every 2 seconds.
8555 * The heartbeat is to tell the ASF firmware that the host
8556 * driver is still alive. In the event that the OS crashes,
8557 * ASF needs to reset the hardware to free up the FIFO space
8558 * that may be filled with rx packets destined for the host.
8559 * If the FIFO is full, ASF will no longer function properly.
8561 * Unintended resets have been reported on real time kernels
8562 * where the timer doesn't run on time. Netpoll will also have
8565 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8566 * to check the ring condition when the heartbeat is expiring
8567 * before doing the reset. This will prevent most unintended
8570 if (!--tp->asf_counter) {
8571 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
8572 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
8573 tg3_wait_for_event_ack(tp);
8575 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8576 FWCMD_NICDRV_ALIVE3);
8577 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8578 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8579 TG3_FW_UPDATE_TIMEOUT_SEC);
8581 tg3_generate_fw_event(tp);
8583 tp->asf_counter = tp->asf_multiplier;
8586 spin_unlock(&tp->lock);
8589 tp->timer.expires = jiffies + tp->timer_offset;
8590 add_timer(&tp->timer);
8593 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8596 unsigned long flags;
8598 struct tg3_napi *tnapi = &tp->napi[irq_num];
8600 if (tp->irq_cnt == 1)
8601 name = tp->dev->name;
8603 name = &tnapi->irq_lbl[0];
8604 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8605 name[IFNAMSIZ-1] = 0;
8608 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8610 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8612 flags = IRQF_SAMPLE_RANDOM;
8615 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8616 fn = tg3_interrupt_tagged;
8617 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8620 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8623 static int tg3_test_interrupt(struct tg3 *tp)
8625 struct tg3_napi *tnapi = &tp->napi[0];
8626 struct net_device *dev = tp->dev;
8627 int err, i, intr_ok = 0;
8630 if (!netif_running(dev))
8633 tg3_disable_ints(tp);
8635 free_irq(tnapi->irq_vec, tnapi);
8638 * Turn off MSI one shot mode. Otherwise this test has no
8639 * observable way to know whether the interrupt was delivered.
8641 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8644 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8645 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
8646 tw32(MSGINT_MODE, val);
8649 err = request_irq(tnapi->irq_vec, tg3_test_isr,
8650 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
8654 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
8655 tg3_enable_ints(tp);
8657 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8660 for (i = 0; i < 5; i++) {
8661 u32 int_mbox, misc_host_ctrl;
8663 int_mbox = tr32_mailbox(tnapi->int_mbox);
8664 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8666 if ((int_mbox != 0) ||
8667 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8675 tg3_disable_ints(tp);
8677 free_irq(tnapi->irq_vec, tnapi);
8679 err = tg3_request_irq(tp, 0);
8685 /* Reenable MSI one shot mode. */
8686 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8687 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
8689 (tp->tg3_flags2 & TG3_FLG2_USING_MSI)) {
8690 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
8691 tw32(MSGINT_MODE, val);
8699 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8700 * successfully restored
8702 static int tg3_test_msi(struct tg3 *tp)
8707 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8710 /* Turn off SERR reporting in case MSI terminates with Master
8713 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8714 pci_write_config_word(tp->pdev, PCI_COMMAND,
8715 pci_cmd & ~PCI_COMMAND_SERR);
8717 err = tg3_test_interrupt(tp);
8719 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8724 /* other failures */
8728 /* MSI test failed, go back to INTx mode */
8729 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8730 "to INTx mode. Please report this failure to the PCI "
8731 "maintainer and include system chipset information\n");
8733 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8735 pci_disable_msi(tp->pdev);
8737 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8738 tp->napi[0].irq_vec = tp->pdev->irq;
8740 err = tg3_request_irq(tp, 0);
8744 /* Need to reset the chip because the MSI cycle may have terminated
8745 * with Master Abort.
8747 tg3_full_lock(tp, 1);
8749 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8750 err = tg3_init_hw(tp, 1);
8752 tg3_full_unlock(tp);
8755 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8760 static int tg3_request_firmware(struct tg3 *tp)
8762 const __be32 *fw_data;
8764 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
8765 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
8770 fw_data = (void *)tp->fw->data;
8772 /* Firmware blob starts with version numbers, followed by
8773 * start address and _full_ length including BSS sections
8774 * (which must be longer than the actual data, of course
8777 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
8778 if (tp->fw_len < (tp->fw->size - 12)) {
8779 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
8780 tp->fw_len, tp->fw_needed);
8781 release_firmware(tp->fw);
8786 /* We no longer need firmware; we have it. */
8787 tp->fw_needed = NULL;
8791 static bool tg3_enable_msix(struct tg3 *tp)
8793 int i, rc, cpus = num_online_cpus();
8794 struct msix_entry msix_ent[tp->irq_max];
8797 /* Just fallback to the simpler MSI mode. */
8801 * We want as many rx rings enabled as there are cpus.
8802 * The first MSIX vector only deals with link interrupts, etc,
8803 * so we add one to the number of vectors we are requesting.
8805 tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
8807 for (i = 0; i < tp->irq_max; i++) {
8808 msix_ent[i].entry = i;
8809 msix_ent[i].vector = 0;
8812 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
8815 } else if (rc != 0) {
8816 if (pci_enable_msix(tp->pdev, msix_ent, rc))
8818 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
8823 for (i = 0; i < tp->irq_max; i++)
8824 tp->napi[i].irq_vec = msix_ent[i].vector;
8826 tp->dev->real_num_tx_queues = 1;
8827 if (tp->irq_cnt > 1) {
8828 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
8830 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8831 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8832 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
8833 tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
8840 static void tg3_ints_init(struct tg3 *tp)
8842 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI_OR_MSIX) &&
8843 !(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8844 /* All MSI supporting chips should support tagged
8845 * status. Assert that this is the case.
8847 netdev_warn(tp->dev,
8848 "MSI without TAGGED_STATUS? Not using MSI\n");
8852 if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) && tg3_enable_msix(tp))
8853 tp->tg3_flags2 |= TG3_FLG2_USING_MSIX;
8854 else if ((tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) &&
8855 pci_enable_msi(tp->pdev) == 0)
8856 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8858 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
8859 u32 msi_mode = tr32(MSGINT_MODE);
8860 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8861 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
8862 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8865 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
8867 tp->napi[0].irq_vec = tp->pdev->irq;
8868 tp->dev->real_num_tx_queues = 1;
8872 static void tg3_ints_fini(struct tg3 *tp)
8874 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
8875 pci_disable_msix(tp->pdev);
8876 else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
8877 pci_disable_msi(tp->pdev);
8878 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
8879 tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
8882 static int tg3_open(struct net_device *dev)
8884 struct tg3 *tp = netdev_priv(dev);
8887 if (tp->fw_needed) {
8888 err = tg3_request_firmware(tp);
8889 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8893 netdev_warn(tp->dev, "TSO capability disabled\n");
8894 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8895 } else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
8896 netdev_notice(tp->dev, "TSO capability restored\n");
8897 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8901 netif_carrier_off(tp->dev);
8903 err = tg3_set_power_state(tp, PCI_D0);
8907 tg3_full_lock(tp, 0);
8909 tg3_disable_ints(tp);
8910 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8912 tg3_full_unlock(tp);
8915 * Setup interrupts first so we know how
8916 * many NAPI resources to allocate
8920 /* The placement of this call is tied
8921 * to the setup and use of Host TX descriptors.
8923 err = tg3_alloc_consistent(tp);
8927 tg3_napi_enable(tp);
8929 for (i = 0; i < tp->irq_cnt; i++) {
8930 struct tg3_napi *tnapi = &tp->napi[i];
8931 err = tg3_request_irq(tp, i);
8933 for (i--; i >= 0; i--)
8934 free_irq(tnapi->irq_vec, tnapi);
8942 tg3_full_lock(tp, 0);
8944 err = tg3_init_hw(tp, 1);
8946 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8949 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8950 tp->timer_offset = HZ;
8952 tp->timer_offset = HZ / 10;
8954 BUG_ON(tp->timer_offset > HZ);
8955 tp->timer_counter = tp->timer_multiplier =
8956 (HZ / tp->timer_offset);
8957 tp->asf_counter = tp->asf_multiplier =
8958 ((HZ / tp->timer_offset) * 2);
8960 init_timer(&tp->timer);
8961 tp->timer.expires = jiffies + tp->timer_offset;
8962 tp->timer.data = (unsigned long) tp;
8963 tp->timer.function = tg3_timer;
8966 tg3_full_unlock(tp);
8971 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8972 err = tg3_test_msi(tp);
8975 tg3_full_lock(tp, 0);
8976 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8978 tg3_full_unlock(tp);
8983 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8984 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
8985 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8986 (tp->tg3_flags2 & TG3_FLG2_USING_MSI) &&
8987 (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)) {
8988 u32 val = tr32(PCIE_TRANSACTION_CFG);
8990 tw32(PCIE_TRANSACTION_CFG,
8991 val | PCIE_TRANS_CFG_1SHOT_MSI);
8997 tg3_full_lock(tp, 0);
8999 add_timer(&tp->timer);
9000 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
9001 tg3_enable_ints(tp);
9003 tg3_full_unlock(tp);
9005 netif_tx_start_all_queues(dev);
9010 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9011 struct tg3_napi *tnapi = &tp->napi[i];
9012 free_irq(tnapi->irq_vec, tnapi);
9016 tg3_napi_disable(tp);
9017 tg3_free_consistent(tp);
9024 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9025 struct rtnl_link_stats64 *);
9026 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9028 static int tg3_close(struct net_device *dev)
9031 struct tg3 *tp = netdev_priv(dev);
9033 tg3_napi_disable(tp);
9034 cancel_work_sync(&tp->reset_task);
9036 netif_tx_stop_all_queues(dev);
9038 del_timer_sync(&tp->timer);
9042 tg3_full_lock(tp, 1);
9044 tg3_disable_ints(tp);
9046 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9048 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
9050 tg3_full_unlock(tp);
9052 for (i = tp->irq_cnt - 1; i >= 0; i--) {
9053 struct tg3_napi *tnapi = &tp->napi[i];
9054 free_irq(tnapi->irq_vec, tnapi);
9059 tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9061 memcpy(&tp->estats_prev, tg3_get_estats(tp),
9062 sizeof(tp->estats_prev));
9064 tg3_free_consistent(tp);
9066 tg3_set_power_state(tp, PCI_D3hot);
9068 netif_carrier_off(tp->dev);
9073 static inline u64 get_stat64(tg3_stat64_t *val)
9075 return ((u64)val->high << 32) | ((u64)val->low);
9078 static u64 calc_crc_errors(struct tg3 *tp)
9080 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9082 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
9083 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9087 spin_lock_bh(&tp->lock);
9088 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9089 tg3_writephy(tp, MII_TG3_TEST1,
9090 val | MII_TG3_TEST1_CRC_EN);
9091 tg3_readphy(tp, 0x14, &val);
9094 spin_unlock_bh(&tp->lock);
9096 tp->phy_crc_errors += val;
9098 return tp->phy_crc_errors;
9101 return get_stat64(&hw_stats->rx_fcs_errors);
9104 #define ESTAT_ADD(member) \
9105 estats->member = old_estats->member + \
9106 get_stat64(&hw_stats->member)
9108 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9110 struct tg3_ethtool_stats *estats = &tp->estats;
9111 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9112 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9117 ESTAT_ADD(rx_octets);
9118 ESTAT_ADD(rx_fragments);
9119 ESTAT_ADD(rx_ucast_packets);
9120 ESTAT_ADD(rx_mcast_packets);
9121 ESTAT_ADD(rx_bcast_packets);
9122 ESTAT_ADD(rx_fcs_errors);
9123 ESTAT_ADD(rx_align_errors);
9124 ESTAT_ADD(rx_xon_pause_rcvd);
9125 ESTAT_ADD(rx_xoff_pause_rcvd);
9126 ESTAT_ADD(rx_mac_ctrl_rcvd);
9127 ESTAT_ADD(rx_xoff_entered);
9128 ESTAT_ADD(rx_frame_too_long_errors);
9129 ESTAT_ADD(rx_jabbers);
9130 ESTAT_ADD(rx_undersize_packets);
9131 ESTAT_ADD(rx_in_length_errors);
9132 ESTAT_ADD(rx_out_length_errors);
9133 ESTAT_ADD(rx_64_or_less_octet_packets);
9134 ESTAT_ADD(rx_65_to_127_octet_packets);
9135 ESTAT_ADD(rx_128_to_255_octet_packets);
9136 ESTAT_ADD(rx_256_to_511_octet_packets);
9137 ESTAT_ADD(rx_512_to_1023_octet_packets);
9138 ESTAT_ADD(rx_1024_to_1522_octet_packets);
9139 ESTAT_ADD(rx_1523_to_2047_octet_packets);
9140 ESTAT_ADD(rx_2048_to_4095_octet_packets);
9141 ESTAT_ADD(rx_4096_to_8191_octet_packets);
9142 ESTAT_ADD(rx_8192_to_9022_octet_packets);
9144 ESTAT_ADD(tx_octets);
9145 ESTAT_ADD(tx_collisions);
9146 ESTAT_ADD(tx_xon_sent);
9147 ESTAT_ADD(tx_xoff_sent);
9148 ESTAT_ADD(tx_flow_control);
9149 ESTAT_ADD(tx_mac_errors);
9150 ESTAT_ADD(tx_single_collisions);
9151 ESTAT_ADD(tx_mult_collisions);
9152 ESTAT_ADD(tx_deferred);
9153 ESTAT_ADD(tx_excessive_collisions);
9154 ESTAT_ADD(tx_late_collisions);
9155 ESTAT_ADD(tx_collide_2times);
9156 ESTAT_ADD(tx_collide_3times);
9157 ESTAT_ADD(tx_collide_4times);
9158 ESTAT_ADD(tx_collide_5times);
9159 ESTAT_ADD(tx_collide_6times);
9160 ESTAT_ADD(tx_collide_7times);
9161 ESTAT_ADD(tx_collide_8times);
9162 ESTAT_ADD(tx_collide_9times);
9163 ESTAT_ADD(tx_collide_10times);
9164 ESTAT_ADD(tx_collide_11times);
9165 ESTAT_ADD(tx_collide_12times);
9166 ESTAT_ADD(tx_collide_13times);
9167 ESTAT_ADD(tx_collide_14times);
9168 ESTAT_ADD(tx_collide_15times);
9169 ESTAT_ADD(tx_ucast_packets);
9170 ESTAT_ADD(tx_mcast_packets);
9171 ESTAT_ADD(tx_bcast_packets);
9172 ESTAT_ADD(tx_carrier_sense_errors);
9173 ESTAT_ADD(tx_discards);
9174 ESTAT_ADD(tx_errors);
9176 ESTAT_ADD(dma_writeq_full);
9177 ESTAT_ADD(dma_write_prioq_full);
9178 ESTAT_ADD(rxbds_empty);
9179 ESTAT_ADD(rx_discards);
9180 ESTAT_ADD(rx_errors);
9181 ESTAT_ADD(rx_threshold_hit);
9183 ESTAT_ADD(dma_readq_full);
9184 ESTAT_ADD(dma_read_prioq_full);
9185 ESTAT_ADD(tx_comp_queue_full);
9187 ESTAT_ADD(ring_set_send_prod_index);
9188 ESTAT_ADD(ring_status_update);
9189 ESTAT_ADD(nic_irqs);
9190 ESTAT_ADD(nic_avoided_irqs);
9191 ESTAT_ADD(nic_tx_threshold_hit);
9196 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9197 struct rtnl_link_stats64 *stats)
9199 struct tg3 *tp = netdev_priv(dev);
9200 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9201 struct tg3_hw_stats *hw_stats = tp->hw_stats;
9206 stats->rx_packets = old_stats->rx_packets +
9207 get_stat64(&hw_stats->rx_ucast_packets) +
9208 get_stat64(&hw_stats->rx_mcast_packets) +
9209 get_stat64(&hw_stats->rx_bcast_packets);
9211 stats->tx_packets = old_stats->tx_packets +
9212 get_stat64(&hw_stats->tx_ucast_packets) +
9213 get_stat64(&hw_stats->tx_mcast_packets) +
9214 get_stat64(&hw_stats->tx_bcast_packets);
9216 stats->rx_bytes = old_stats->rx_bytes +
9217 get_stat64(&hw_stats->rx_octets);
9218 stats->tx_bytes = old_stats->tx_bytes +
9219 get_stat64(&hw_stats->tx_octets);
9221 stats->rx_errors = old_stats->rx_errors +
9222 get_stat64(&hw_stats->rx_errors);
9223 stats->tx_errors = old_stats->tx_errors +
9224 get_stat64(&hw_stats->tx_errors) +
9225 get_stat64(&hw_stats->tx_mac_errors) +
9226 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9227 get_stat64(&hw_stats->tx_discards);
9229 stats->multicast = old_stats->multicast +
9230 get_stat64(&hw_stats->rx_mcast_packets);
9231 stats->collisions = old_stats->collisions +
9232 get_stat64(&hw_stats->tx_collisions);
9234 stats->rx_length_errors = old_stats->rx_length_errors +
9235 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9236 get_stat64(&hw_stats->rx_undersize_packets);
9238 stats->rx_over_errors = old_stats->rx_over_errors +
9239 get_stat64(&hw_stats->rxbds_empty);
9240 stats->rx_frame_errors = old_stats->rx_frame_errors +
9241 get_stat64(&hw_stats->rx_align_errors);
9242 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9243 get_stat64(&hw_stats->tx_discards);
9244 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9245 get_stat64(&hw_stats->tx_carrier_sense_errors);
9247 stats->rx_crc_errors = old_stats->rx_crc_errors +
9248 calc_crc_errors(tp);
9250 stats->rx_missed_errors = old_stats->rx_missed_errors +
9251 get_stat64(&hw_stats->rx_discards);
9256 static inline u32 calc_crc(unsigned char *buf, int len)
9264 for (j = 0; j < len; j++) {
9267 for (k = 0; k < 8; k++) {
9280 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9282 /* accept or reject all multicast frames */
9283 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9284 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9285 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9286 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9289 static void __tg3_set_rx_mode(struct net_device *dev)
9291 struct tg3 *tp = netdev_priv(dev);
9294 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9295 RX_MODE_KEEP_VLAN_TAG);
9297 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9300 #if TG3_VLAN_TAG_USED
9302 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9303 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9305 /* By definition, VLAN is disabled always in this
9308 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
9309 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9312 if (dev->flags & IFF_PROMISC) {
9313 /* Promiscuous mode. */
9314 rx_mode |= RX_MODE_PROMISC;
9315 } else if (dev->flags & IFF_ALLMULTI) {
9316 /* Accept all multicast. */
9317 tg3_set_multi(tp, 1);
9318 } else if (netdev_mc_empty(dev)) {
9319 /* Reject all multicast. */
9320 tg3_set_multi(tp, 0);
9322 /* Accept one or more multicast(s). */
9323 struct netdev_hw_addr *ha;
9324 u32 mc_filter[4] = { 0, };
9329 netdev_for_each_mc_addr(ha, dev) {
9330 crc = calc_crc(ha->addr, ETH_ALEN);
9332 regidx = (bit & 0x60) >> 5;
9334 mc_filter[regidx] |= (1 << bit);
9337 tw32(MAC_HASH_REG_0, mc_filter[0]);
9338 tw32(MAC_HASH_REG_1, mc_filter[1]);
9339 tw32(MAC_HASH_REG_2, mc_filter[2]);
9340 tw32(MAC_HASH_REG_3, mc_filter[3]);
9343 if (rx_mode != tp->rx_mode) {
9344 tp->rx_mode = rx_mode;
9345 tw32_f(MAC_RX_MODE, rx_mode);
9350 static void tg3_set_rx_mode(struct net_device *dev)
9352 struct tg3 *tp = netdev_priv(dev);
9354 if (!netif_running(dev))
9357 tg3_full_lock(tp, 0);
9358 __tg3_set_rx_mode(dev);
9359 tg3_full_unlock(tp);
9362 #define TG3_REGDUMP_LEN (32 * 1024)
9364 static int tg3_get_regs_len(struct net_device *dev)
9366 return TG3_REGDUMP_LEN;
9369 static void tg3_get_regs(struct net_device *dev,
9370 struct ethtool_regs *regs, void *_p)
9373 struct tg3 *tp = netdev_priv(dev);
9379 memset(p, 0, TG3_REGDUMP_LEN);
9381 if (tp->link_config.phy_is_low_power)
9384 tg3_full_lock(tp, 0);
9386 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
9387 #define GET_REG32_LOOP(base,len) \
9388 do { p = (u32 *)(orig_p + (base)); \
9389 for (i = 0; i < len; i += 4) \
9390 __GET_REG32((base) + i); \
9392 #define GET_REG32_1(reg) \
9393 do { p = (u32 *)(orig_p + (reg)); \
9394 __GET_REG32((reg)); \
9397 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
9398 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
9399 GET_REG32_LOOP(MAC_MODE, 0x4f0);
9400 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
9401 GET_REG32_1(SNDDATAC_MODE);
9402 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
9403 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
9404 GET_REG32_1(SNDBDC_MODE);
9405 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
9406 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
9407 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
9408 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
9409 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
9410 GET_REG32_1(RCVDCC_MODE);
9411 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
9412 GET_REG32_LOOP(RCVCC_MODE, 0x14);
9413 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
9414 GET_REG32_1(MBFREE_MODE);
9415 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
9416 GET_REG32_LOOP(MEMARB_MODE, 0x10);
9417 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
9418 GET_REG32_LOOP(RDMAC_MODE, 0x08);
9419 GET_REG32_LOOP(WDMAC_MODE, 0x08);
9420 GET_REG32_1(RX_CPU_MODE);
9421 GET_REG32_1(RX_CPU_STATE);
9422 GET_REG32_1(RX_CPU_PGMCTR);
9423 GET_REG32_1(RX_CPU_HWBKPT);
9424 GET_REG32_1(TX_CPU_MODE);
9425 GET_REG32_1(TX_CPU_STATE);
9426 GET_REG32_1(TX_CPU_PGMCTR);
9427 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
9428 GET_REG32_LOOP(FTQ_RESET, 0x120);
9429 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
9430 GET_REG32_1(DMAC_MODE);
9431 GET_REG32_LOOP(GRC_MODE, 0x4c);
9432 if (tp->tg3_flags & TG3_FLAG_NVRAM)
9433 GET_REG32_LOOP(NVRAM_CMD, 0x24);
9436 #undef GET_REG32_LOOP
9439 tg3_full_unlock(tp);
9442 static int tg3_get_eeprom_len(struct net_device *dev)
9444 struct tg3 *tp = netdev_priv(dev);
9446 return tp->nvram_size;
9449 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9451 struct tg3 *tp = netdev_priv(dev);
9454 u32 i, offset, len, b_offset, b_count;
9457 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
9460 if (tp->link_config.phy_is_low_power)
9463 offset = eeprom->offset;
9467 eeprom->magic = TG3_EEPROM_MAGIC;
9470 /* adjustments to start on required 4 byte boundary */
9471 b_offset = offset & 3;
9472 b_count = 4 - b_offset;
9473 if (b_count > len) {
9474 /* i.e. offset=1 len=2 */
9477 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9480 memcpy(data, ((char*)&val) + b_offset, b_count);
9483 eeprom->len += b_count;
9486 /* read bytes upto the last 4 byte boundary */
9487 pd = &data[eeprom->len];
9488 for (i = 0; i < (len - (len & 3)); i += 4) {
9489 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9494 memcpy(pd + i, &val, 4);
9499 /* read last bytes not ending on 4 byte boundary */
9500 pd = &data[eeprom->len];
9502 b_offset = offset + len - b_count;
9503 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9506 memcpy(pd, &val, b_count);
9507 eeprom->len += b_count;
9512 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9514 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9516 struct tg3 *tp = netdev_priv(dev);
9518 u32 offset, len, b_offset, odd_len;
9522 if (tp->link_config.phy_is_low_power)
9525 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
9526 eeprom->magic != TG3_EEPROM_MAGIC)
9529 offset = eeprom->offset;
9532 if ((b_offset = (offset & 3))) {
9533 /* adjustments to start on required 4 byte boundary */
9534 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9545 /* adjustments to end on required 4 byte boundary */
9547 len = (len + 3) & ~3;
9548 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9554 if (b_offset || odd_len) {
9555 buf = kmalloc(len, GFP_KERNEL);
9559 memcpy(buf, &start, 4);
9561 memcpy(buf+len-4, &end, 4);
9562 memcpy(buf + b_offset, data, eeprom->len);
9565 ret = tg3_nvram_write_block(tp, offset, len, buf);
9573 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9575 struct tg3 *tp = netdev_priv(dev);
9577 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9578 struct phy_device *phydev;
9579 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9581 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9582 return phy_ethtool_gset(phydev, cmd);
9585 cmd->supported = (SUPPORTED_Autoneg);
9587 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9588 cmd->supported |= (SUPPORTED_1000baseT_Half |
9589 SUPPORTED_1000baseT_Full);
9591 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9592 cmd->supported |= (SUPPORTED_100baseT_Half |
9593 SUPPORTED_100baseT_Full |
9594 SUPPORTED_10baseT_Half |
9595 SUPPORTED_10baseT_Full |
9597 cmd->port = PORT_TP;
9599 cmd->supported |= SUPPORTED_FIBRE;
9600 cmd->port = PORT_FIBRE;
9603 cmd->advertising = tp->link_config.advertising;
9604 if (netif_running(dev)) {
9605 cmd->speed = tp->link_config.active_speed;
9606 cmd->duplex = tp->link_config.active_duplex;
9608 cmd->phy_address = tp->phy_addr;
9609 cmd->transceiver = XCVR_INTERNAL;
9610 cmd->autoneg = tp->link_config.autoneg;
9616 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9618 struct tg3 *tp = netdev_priv(dev);
9620 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9621 struct phy_device *phydev;
9622 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9624 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9625 return phy_ethtool_sset(phydev, cmd);
9628 if (cmd->autoneg != AUTONEG_ENABLE &&
9629 cmd->autoneg != AUTONEG_DISABLE)
9632 if (cmd->autoneg == AUTONEG_DISABLE &&
9633 cmd->duplex != DUPLEX_FULL &&
9634 cmd->duplex != DUPLEX_HALF)
9637 if (cmd->autoneg == AUTONEG_ENABLE) {
9638 u32 mask = ADVERTISED_Autoneg |
9640 ADVERTISED_Asym_Pause;
9642 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9643 mask |= ADVERTISED_1000baseT_Half |
9644 ADVERTISED_1000baseT_Full;
9646 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
9647 mask |= ADVERTISED_100baseT_Half |
9648 ADVERTISED_100baseT_Full |
9649 ADVERTISED_10baseT_Half |
9650 ADVERTISED_10baseT_Full |
9653 mask |= ADVERTISED_FIBRE;
9655 if (cmd->advertising & ~mask)
9658 mask &= (ADVERTISED_1000baseT_Half |
9659 ADVERTISED_1000baseT_Full |
9660 ADVERTISED_100baseT_Half |
9661 ADVERTISED_100baseT_Full |
9662 ADVERTISED_10baseT_Half |
9663 ADVERTISED_10baseT_Full);
9665 cmd->advertising &= mask;
9667 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9668 if (cmd->speed != SPEED_1000)
9671 if (cmd->duplex != DUPLEX_FULL)
9674 if (cmd->speed != SPEED_100 &&
9675 cmd->speed != SPEED_10)
9680 tg3_full_lock(tp, 0);
9682 tp->link_config.autoneg = cmd->autoneg;
9683 if (cmd->autoneg == AUTONEG_ENABLE) {
9684 tp->link_config.advertising = (cmd->advertising |
9685 ADVERTISED_Autoneg);
9686 tp->link_config.speed = SPEED_INVALID;
9687 tp->link_config.duplex = DUPLEX_INVALID;
9689 tp->link_config.advertising = 0;
9690 tp->link_config.speed = cmd->speed;
9691 tp->link_config.duplex = cmd->duplex;
9694 tp->link_config.orig_speed = tp->link_config.speed;
9695 tp->link_config.orig_duplex = tp->link_config.duplex;
9696 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9698 if (netif_running(dev))
9699 tg3_setup_phy(tp, 1);
9701 tg3_full_unlock(tp);
9706 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9708 struct tg3 *tp = netdev_priv(dev);
9710 strcpy(info->driver, DRV_MODULE_NAME);
9711 strcpy(info->version, DRV_MODULE_VERSION);
9712 strcpy(info->fw_version, tp->fw_ver);
9713 strcpy(info->bus_info, pci_name(tp->pdev));
9716 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9718 struct tg3 *tp = netdev_priv(dev);
9720 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9721 device_can_wakeup(&tp->pdev->dev))
9722 wol->supported = WAKE_MAGIC;
9726 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9727 device_can_wakeup(&tp->pdev->dev))
9728 wol->wolopts = WAKE_MAGIC;
9729 memset(&wol->sopass, 0, sizeof(wol->sopass));
9732 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9734 struct tg3 *tp = netdev_priv(dev);
9735 struct device *dp = &tp->pdev->dev;
9737 if (wol->wolopts & ~WAKE_MAGIC)
9739 if ((wol->wolopts & WAKE_MAGIC) &&
9740 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9743 spin_lock_bh(&tp->lock);
9744 if (wol->wolopts & WAKE_MAGIC) {
9745 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9746 device_set_wakeup_enable(dp, true);
9748 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9749 device_set_wakeup_enable(dp, false);
9751 spin_unlock_bh(&tp->lock);
9756 static u32 tg3_get_msglevel(struct net_device *dev)
9758 struct tg3 *tp = netdev_priv(dev);
9759 return tp->msg_enable;
9762 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9764 struct tg3 *tp = netdev_priv(dev);
9765 tp->msg_enable = value;
9768 static int tg3_set_tso(struct net_device *dev, u32 value)
9770 struct tg3 *tp = netdev_priv(dev);
9772 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9777 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9778 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9779 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9781 dev->features |= NETIF_F_TSO6;
9782 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9783 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9784 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9785 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9787 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9788 dev->features |= NETIF_F_TSO_ECN;
9790 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9792 return ethtool_op_set_tso(dev, value);
9795 static int tg3_nway_reset(struct net_device *dev)
9797 struct tg3 *tp = netdev_priv(dev);
9800 if (!netif_running(dev))
9803 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9806 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9807 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9809 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9813 spin_lock_bh(&tp->lock);
9815 tg3_readphy(tp, MII_BMCR, &bmcr);
9816 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9817 ((bmcr & BMCR_ANENABLE) ||
9818 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9819 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9823 spin_unlock_bh(&tp->lock);
9829 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9831 struct tg3 *tp = netdev_priv(dev);
9833 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9834 ering->rx_mini_max_pending = 0;
9835 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9836 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9838 ering->rx_jumbo_max_pending = 0;
9840 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9842 ering->rx_pending = tp->rx_pending;
9843 ering->rx_mini_pending = 0;
9844 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9845 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9847 ering->rx_jumbo_pending = 0;
9849 ering->tx_pending = tp->napi[0].tx_pending;
9852 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9854 struct tg3 *tp = netdev_priv(dev);
9855 int i, irq_sync = 0, err = 0;
9857 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9858 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9859 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9860 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9861 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9862 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9865 if (netif_running(dev)) {
9871 tg3_full_lock(tp, irq_sync);
9873 tp->rx_pending = ering->rx_pending;
9875 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9876 tp->rx_pending > 63)
9877 tp->rx_pending = 63;
9878 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9880 for (i = 0; i < TG3_IRQ_MAX_VECS; i++)
9881 tp->napi[i].tx_pending = ering->tx_pending;
9883 if (netif_running(dev)) {
9884 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9885 err = tg3_restart_hw(tp, 1);
9887 tg3_netif_start(tp);
9890 tg3_full_unlock(tp);
9892 if (irq_sync && !err)
9898 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9900 struct tg3 *tp = netdev_priv(dev);
9902 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9904 if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
9905 epause->rx_pause = 1;
9907 epause->rx_pause = 0;
9909 if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
9910 epause->tx_pause = 1;
9912 epause->tx_pause = 0;
9915 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9917 struct tg3 *tp = netdev_priv(dev);
9920 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9922 struct phy_device *phydev;
9924 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9926 if (!(phydev->supported & SUPPORTED_Pause) ||
9927 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
9928 ((epause->rx_pause && !epause->tx_pause) ||
9929 (!epause->rx_pause && epause->tx_pause))))
9932 tp->link_config.flowctrl = 0;
9933 if (epause->rx_pause) {
9934 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9936 if (epause->tx_pause) {
9937 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9938 newadv = ADVERTISED_Pause;
9940 newadv = ADVERTISED_Pause |
9941 ADVERTISED_Asym_Pause;
9942 } else if (epause->tx_pause) {
9943 tp->link_config.flowctrl |= FLOW_CTRL_TX;
9944 newadv = ADVERTISED_Asym_Pause;
9948 if (epause->autoneg)
9949 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9951 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9953 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9954 u32 oldadv = phydev->advertising &
9955 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
9956 if (oldadv != newadv) {
9957 phydev->advertising &=
9958 ~(ADVERTISED_Pause |
9959 ADVERTISED_Asym_Pause);
9960 phydev->advertising |= newadv;
9961 if (phydev->autoneg) {
9963 * Always renegotiate the link to
9964 * inform our link partner of our
9965 * flow control settings, even if the
9966 * flow control is forced. Let
9967 * tg3_adjust_link() do the final
9968 * flow control setup.
9970 return phy_start_aneg(phydev);
9974 if (!epause->autoneg)
9975 tg3_setup_flow_control(tp, 0, 0);
9977 tp->link_config.orig_advertising &=
9978 ~(ADVERTISED_Pause |
9979 ADVERTISED_Asym_Pause);
9980 tp->link_config.orig_advertising |= newadv;
9985 if (netif_running(dev)) {
9990 tg3_full_lock(tp, irq_sync);
9992 if (epause->autoneg)
9993 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9995 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9996 if (epause->rx_pause)
9997 tp->link_config.flowctrl |= FLOW_CTRL_RX;
9999 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10000 if (epause->tx_pause)
10001 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10003 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10005 if (netif_running(dev)) {
10006 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10007 err = tg3_restart_hw(tp, 1);
10009 tg3_netif_start(tp);
10012 tg3_full_unlock(tp);
10018 static u32 tg3_get_rx_csum(struct net_device *dev)
10020 struct tg3 *tp = netdev_priv(dev);
10021 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
10024 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10026 struct tg3 *tp = netdev_priv(dev);
10028 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10034 spin_lock_bh(&tp->lock);
10036 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
10038 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
10039 spin_unlock_bh(&tp->lock);
10044 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10046 struct tg3 *tp = netdev_priv(dev);
10048 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10054 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10055 ethtool_op_set_tx_ipv6_csum(dev, data);
10057 ethtool_op_set_tx_csum(dev, data);
10062 static int tg3_get_sset_count(struct net_device *dev, int sset)
10066 return TG3_NUM_TEST;
10068 return TG3_NUM_STATS;
10070 return -EOPNOTSUPP;
10074 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10076 switch (stringset) {
10078 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10081 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10084 WARN_ON(1); /* we need a WARN() */
10089 static int tg3_phys_id(struct net_device *dev, u32 data)
10091 struct tg3 *tp = netdev_priv(dev);
10094 if (!netif_running(tp->dev))
10098 data = UINT_MAX / 2;
10100 for (i = 0; i < (data * 2); i++) {
10102 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10103 LED_CTRL_1000MBPS_ON |
10104 LED_CTRL_100MBPS_ON |
10105 LED_CTRL_10MBPS_ON |
10106 LED_CTRL_TRAFFIC_OVERRIDE |
10107 LED_CTRL_TRAFFIC_BLINK |
10108 LED_CTRL_TRAFFIC_LED);
10111 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10112 LED_CTRL_TRAFFIC_OVERRIDE);
10114 if (msleep_interruptible(500))
10117 tw32(MAC_LED_CTRL, tp->led_ctrl);
10121 static void tg3_get_ethtool_stats(struct net_device *dev,
10122 struct ethtool_stats *estats, u64 *tmp_stats)
10124 struct tg3 *tp = netdev_priv(dev);
10125 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10128 #define NVRAM_TEST_SIZE 0x100
10129 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
10130 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
10131 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
10132 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10133 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10135 static int tg3_test_nvram(struct tg3 *tp)
10139 int i, j, k, err = 0, size;
10141 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM)
10144 if (tg3_nvram_read(tp, 0, &magic) != 0)
10147 if (magic == TG3_EEPROM_MAGIC)
10148 size = NVRAM_TEST_SIZE;
10149 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10150 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10151 TG3_EEPROM_SB_FORMAT_1) {
10152 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10153 case TG3_EEPROM_SB_REVISION_0:
10154 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10156 case TG3_EEPROM_SB_REVISION_2:
10157 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10159 case TG3_EEPROM_SB_REVISION_3:
10160 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10167 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10168 size = NVRAM_SELFBOOT_HW_SIZE;
10172 buf = kmalloc(size, GFP_KERNEL);
10177 for (i = 0, j = 0; i < size; i += 4, j++) {
10178 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10185 /* Selfboot format */
10186 magic = be32_to_cpu(buf[0]);
10187 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10188 TG3_EEPROM_MAGIC_FW) {
10189 u8 *buf8 = (u8 *) buf, csum8 = 0;
10191 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10192 TG3_EEPROM_SB_REVISION_2) {
10193 /* For rev 2, the csum doesn't include the MBA. */
10194 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10196 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10199 for (i = 0; i < size; i++)
10212 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10213 TG3_EEPROM_MAGIC_HW) {
10214 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10215 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10216 u8 *buf8 = (u8 *) buf;
10218 /* Separate the parity bits and the data bytes. */
10219 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10220 if ((i == 0) || (i == 8)) {
10224 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10225 parity[k++] = buf8[i] & msk;
10227 } else if (i == 16) {
10231 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10232 parity[k++] = buf8[i] & msk;
10235 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10236 parity[k++] = buf8[i] & msk;
10239 data[j++] = buf8[i];
10243 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10244 u8 hw8 = hweight8(data[i]);
10246 if ((hw8 & 0x1) && parity[i])
10248 else if (!(hw8 & 0x1) && !parity[i])
10255 /* Bootstrap checksum at offset 0x10 */
10256 csum = calc_crc((unsigned char *) buf, 0x10);
10257 if (csum != be32_to_cpu(buf[0x10/4]))
10260 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10261 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10262 if (csum != be32_to_cpu(buf[0xfc/4]))
10272 #define TG3_SERDES_TIMEOUT_SEC 2
10273 #define TG3_COPPER_TIMEOUT_SEC 6
10275 static int tg3_test_link(struct tg3 *tp)
10279 if (!netif_running(tp->dev))
10282 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
10283 max = TG3_SERDES_TIMEOUT_SEC;
10285 max = TG3_COPPER_TIMEOUT_SEC;
10287 for (i = 0; i < max; i++) {
10288 if (netif_carrier_ok(tp->dev))
10291 if (msleep_interruptible(1000))
10298 /* Only test the commonly used registers */
10299 static int tg3_test_registers(struct tg3 *tp)
10301 int i, is_5705, is_5750;
10302 u32 offset, read_mask, write_mask, val, save_val, read_val;
10306 #define TG3_FL_5705 0x1
10307 #define TG3_FL_NOT_5705 0x2
10308 #define TG3_FL_NOT_5788 0x4
10309 #define TG3_FL_NOT_5750 0x8
10313 /* MAC Control Registers */
10314 { MAC_MODE, TG3_FL_NOT_5705,
10315 0x00000000, 0x00ef6f8c },
10316 { MAC_MODE, TG3_FL_5705,
10317 0x00000000, 0x01ef6b8c },
10318 { MAC_STATUS, TG3_FL_NOT_5705,
10319 0x03800107, 0x00000000 },
10320 { MAC_STATUS, TG3_FL_5705,
10321 0x03800100, 0x00000000 },
10322 { MAC_ADDR_0_HIGH, 0x0000,
10323 0x00000000, 0x0000ffff },
10324 { MAC_ADDR_0_LOW, 0x0000,
10325 0x00000000, 0xffffffff },
10326 { MAC_RX_MTU_SIZE, 0x0000,
10327 0x00000000, 0x0000ffff },
10328 { MAC_TX_MODE, 0x0000,
10329 0x00000000, 0x00000070 },
10330 { MAC_TX_LENGTHS, 0x0000,
10331 0x00000000, 0x00003fff },
10332 { MAC_RX_MODE, TG3_FL_NOT_5705,
10333 0x00000000, 0x000007fc },
10334 { MAC_RX_MODE, TG3_FL_5705,
10335 0x00000000, 0x000007dc },
10336 { MAC_HASH_REG_0, 0x0000,
10337 0x00000000, 0xffffffff },
10338 { MAC_HASH_REG_1, 0x0000,
10339 0x00000000, 0xffffffff },
10340 { MAC_HASH_REG_2, 0x0000,
10341 0x00000000, 0xffffffff },
10342 { MAC_HASH_REG_3, 0x0000,
10343 0x00000000, 0xffffffff },
10345 /* Receive Data and Receive BD Initiator Control Registers. */
10346 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10347 0x00000000, 0xffffffff },
10348 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10349 0x00000000, 0xffffffff },
10350 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10351 0x00000000, 0x00000003 },
10352 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10353 0x00000000, 0xffffffff },
10354 { RCVDBDI_STD_BD+0, 0x0000,
10355 0x00000000, 0xffffffff },
10356 { RCVDBDI_STD_BD+4, 0x0000,
10357 0x00000000, 0xffffffff },
10358 { RCVDBDI_STD_BD+8, 0x0000,
10359 0x00000000, 0xffff0002 },
10360 { RCVDBDI_STD_BD+0xc, 0x0000,
10361 0x00000000, 0xffffffff },
10363 /* Receive BD Initiator Control Registers. */
10364 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10365 0x00000000, 0xffffffff },
10366 { RCVBDI_STD_THRESH, TG3_FL_5705,
10367 0x00000000, 0x000003ff },
10368 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10369 0x00000000, 0xffffffff },
10371 /* Host Coalescing Control Registers. */
10372 { HOSTCC_MODE, TG3_FL_NOT_5705,
10373 0x00000000, 0x00000004 },
10374 { HOSTCC_MODE, TG3_FL_5705,
10375 0x00000000, 0x000000f6 },
10376 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10377 0x00000000, 0xffffffff },
10378 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10379 0x00000000, 0x000003ff },
10380 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10381 0x00000000, 0xffffffff },
10382 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10383 0x00000000, 0x000003ff },
10384 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10385 0x00000000, 0xffffffff },
10386 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10387 0x00000000, 0x000000ff },
10388 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10389 0x00000000, 0xffffffff },
10390 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10391 0x00000000, 0x000000ff },
10392 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10393 0x00000000, 0xffffffff },
10394 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10395 0x00000000, 0xffffffff },
10396 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10397 0x00000000, 0xffffffff },
10398 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10399 0x00000000, 0x000000ff },
10400 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10401 0x00000000, 0xffffffff },
10402 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10403 0x00000000, 0x000000ff },
10404 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10405 0x00000000, 0xffffffff },
10406 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10407 0x00000000, 0xffffffff },
10408 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10409 0x00000000, 0xffffffff },
10410 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10411 0x00000000, 0xffffffff },
10412 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10413 0x00000000, 0xffffffff },
10414 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10415 0xffffffff, 0x00000000 },
10416 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10417 0xffffffff, 0x00000000 },
10419 /* Buffer Manager Control Registers. */
10420 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10421 0x00000000, 0x007fff80 },
10422 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10423 0x00000000, 0x007fffff },
10424 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10425 0x00000000, 0x0000003f },
10426 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10427 0x00000000, 0x000001ff },
10428 { BUFMGR_MB_HIGH_WATER, 0x0000,
10429 0x00000000, 0x000001ff },
10430 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10431 0xffffffff, 0x00000000 },
10432 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10433 0xffffffff, 0x00000000 },
10435 /* Mailbox Registers */
10436 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10437 0x00000000, 0x000001ff },
10438 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10439 0x00000000, 0x000001ff },
10440 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10441 0x00000000, 0x000007ff },
10442 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10443 0x00000000, 0x000001ff },
10445 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10448 is_5705 = is_5750 = 0;
10449 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10451 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
10455 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10456 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10459 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10462 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
10463 (reg_tbl[i].flags & TG3_FL_NOT_5788))
10466 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10469 offset = (u32) reg_tbl[i].offset;
10470 read_mask = reg_tbl[i].read_mask;
10471 write_mask = reg_tbl[i].write_mask;
10473 /* Save the original register content */
10474 save_val = tr32(offset);
10476 /* Determine the read-only value. */
10477 read_val = save_val & read_mask;
10479 /* Write zero to the register, then make sure the read-only bits
10480 * are not changed and the read/write bits are all zeros.
10484 val = tr32(offset);
10486 /* Test the read-only and read/write bits. */
10487 if (((val & read_mask) != read_val) || (val & write_mask))
10490 /* Write ones to all the bits defined by RdMask and WrMask, then
10491 * make sure the read-only bits are not changed and the
10492 * read/write bits are all ones.
10494 tw32(offset, read_mask | write_mask);
10496 val = tr32(offset);
10498 /* Test the read-only bits. */
10499 if ((val & read_mask) != read_val)
10502 /* Test the read/write bits. */
10503 if ((val & write_mask) != write_mask)
10506 tw32(offset, save_val);
10512 if (netif_msg_hw(tp))
10513 netdev_err(tp->dev,
10514 "Register test failed at offset %x\n", offset);
10515 tw32(offset, save_val);
10519 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10521 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10525 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10526 for (j = 0; j < len; j += 4) {
10529 tg3_write_mem(tp, offset + j, test_pattern[i]);
10530 tg3_read_mem(tp, offset + j, &val);
10531 if (val != test_pattern[i])
10538 static int tg3_test_memory(struct tg3 *tp)
10540 static struct mem_entry {
10543 } mem_tbl_570x[] = {
10544 { 0x00000000, 0x00b50},
10545 { 0x00002000, 0x1c000},
10546 { 0xffffffff, 0x00000}
10547 }, mem_tbl_5705[] = {
10548 { 0x00000100, 0x0000c},
10549 { 0x00000200, 0x00008},
10550 { 0x00004000, 0x00800},
10551 { 0x00006000, 0x01000},
10552 { 0x00008000, 0x02000},
10553 { 0x00010000, 0x0e000},
10554 { 0xffffffff, 0x00000}
10555 }, mem_tbl_5755[] = {
10556 { 0x00000200, 0x00008},
10557 { 0x00004000, 0x00800},
10558 { 0x00006000, 0x00800},
10559 { 0x00008000, 0x02000},
10560 { 0x00010000, 0x0c000},
10561 { 0xffffffff, 0x00000}
10562 }, mem_tbl_5906[] = {
10563 { 0x00000200, 0x00008},
10564 { 0x00004000, 0x00400},
10565 { 0x00006000, 0x00400},
10566 { 0x00008000, 0x01000},
10567 { 0x00010000, 0x01000},
10568 { 0xffffffff, 0x00000}
10569 }, mem_tbl_5717[] = {
10570 { 0x00000200, 0x00008},
10571 { 0x00010000, 0x0a000},
10572 { 0x00020000, 0x13c00},
10573 { 0xffffffff, 0x00000}
10574 }, mem_tbl_57765[] = {
10575 { 0x00000200, 0x00008},
10576 { 0x00004000, 0x00800},
10577 { 0x00006000, 0x09800},
10578 { 0x00010000, 0x0a000},
10579 { 0xffffffff, 0x00000}
10581 struct mem_entry *mem_tbl;
10585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
10586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
10587 mem_tbl = mem_tbl_5717;
10588 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10589 mem_tbl = mem_tbl_57765;
10590 else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10591 mem_tbl = mem_tbl_5755;
10592 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10593 mem_tbl = mem_tbl_5906;
10594 else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
10595 mem_tbl = mem_tbl_5705;
10597 mem_tbl = mem_tbl_570x;
10599 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10600 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10601 mem_tbl[i].len)) != 0)
10608 #define TG3_MAC_LOOPBACK 0
10609 #define TG3_PHY_LOOPBACK 1
10611 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10613 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10614 u32 desc_idx, coal_now;
10615 struct sk_buff *skb, *rx_skb;
10618 int num_pkts, tx_len, rx_len, i, err;
10619 struct tg3_rx_buffer_desc *desc;
10620 struct tg3_napi *tnapi, *rnapi;
10621 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
10623 tnapi = &tp->napi[0];
10624 rnapi = &tp->napi[0];
10625 if (tp->irq_cnt > 1) {
10626 rnapi = &tp->napi[1];
10627 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
10628 tnapi = &tp->napi[1];
10630 coal_now = tnapi->coal_now | rnapi->coal_now;
10632 if (loopback_mode == TG3_MAC_LOOPBACK) {
10633 /* HW errata - mac loopback fails in some cases on 5780.
10634 * Normal traffic and PHY loopback are not affected by
10637 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10640 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10641 MAC_MODE_PORT_INT_LPBACK;
10642 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10643 mac_mode |= MAC_MODE_LINK_POLARITY;
10644 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10645 mac_mode |= MAC_MODE_PORT_MODE_MII;
10647 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10648 tw32(MAC_MODE, mac_mode);
10649 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10652 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10653 tg3_phy_fet_toggle_apd(tp, false);
10654 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10656 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10658 tg3_phy_toggle_automdix(tp, 0);
10660 tg3_writephy(tp, MII_BMCR, val);
10663 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10664 if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
10665 tg3_writephy(tp, MII_TG3_FET_PTEST,
10666 MII_TG3_FET_PTEST_FRC_TX_LINK |
10667 MII_TG3_FET_PTEST_FRC_TX_LOCK);
10668 /* The write needs to be flushed for the AC131 */
10669 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10670 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
10671 mac_mode |= MAC_MODE_PORT_MODE_MII;
10673 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10675 /* reset to prevent losing 1st rx packet intermittently */
10676 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10677 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10679 tw32_f(MAC_RX_MODE, tp->rx_mode);
10681 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10682 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
10683 if (masked_phy_id == TG3_PHY_ID_BCM5401)
10684 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10685 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
10686 mac_mode |= MAC_MODE_LINK_POLARITY;
10687 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10688 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10690 tw32(MAC_MODE, mac_mode);
10698 skb = netdev_alloc_skb(tp->dev, tx_len);
10702 tx_data = skb_put(skb, tx_len);
10703 memcpy(tx_data, tp->dev->dev_addr, 6);
10704 memset(tx_data + 6, 0x0, 8);
10706 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10708 for (i = 14; i < tx_len; i++)
10709 tx_data[i] = (u8) (i & 0xff);
10711 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10712 if (pci_dma_mapping_error(tp->pdev, map)) {
10713 dev_kfree_skb(skb);
10717 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10722 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
10726 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1);
10731 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
10732 tr32_mailbox(tnapi->prodmbox);
10736 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10737 for (i = 0; i < 35; i++) {
10738 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10743 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
10744 rx_idx = rnapi->hw_status->idx[0].rx_producer;
10745 if ((tx_idx == tnapi->tx_prod) &&
10746 (rx_idx == (rx_start_idx + num_pkts)))
10750 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10751 dev_kfree_skb(skb);
10753 if (tx_idx != tnapi->tx_prod)
10756 if (rx_idx != rx_start_idx + num_pkts)
10759 desc = &rnapi->rx_rcb[rx_start_idx];
10760 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10761 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10762 if (opaque_key != RXD_OPAQUE_RING_STD)
10765 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10766 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10769 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10770 if (rx_len != tx_len)
10773 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10775 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10776 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10778 for (i = 14; i < tx_len; i++) {
10779 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10784 /* tg3_free_rings will unmap and free the rx_skb */
10789 #define TG3_MAC_LOOPBACK_FAILED 1
10790 #define TG3_PHY_LOOPBACK_FAILED 2
10791 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10792 TG3_PHY_LOOPBACK_FAILED)
10794 static int tg3_test_loopback(struct tg3 *tp)
10799 if (!netif_running(tp->dev))
10800 return TG3_LOOPBACK_FAILED;
10802 err = tg3_reset_hw(tp, 1);
10804 return TG3_LOOPBACK_FAILED;
10806 /* Turn off gphy autopowerdown. */
10807 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10808 tg3_phy_toggle_apd(tp, false);
10810 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10814 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10816 /* Wait for up to 40 microseconds to acquire lock. */
10817 for (i = 0; i < 4; i++) {
10818 status = tr32(TG3_CPMU_MUTEX_GNT);
10819 if (status == CPMU_MUTEX_GNT_DRIVER)
10824 if (status != CPMU_MUTEX_GNT_DRIVER)
10825 return TG3_LOOPBACK_FAILED;
10827 /* Turn off link-based power management. */
10828 cpmuctrl = tr32(TG3_CPMU_CTRL);
10829 tw32(TG3_CPMU_CTRL,
10830 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10831 CPMU_CTRL_LINK_AWARE_MODE));
10834 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10835 err |= TG3_MAC_LOOPBACK_FAILED;
10837 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) {
10838 tw32(TG3_CPMU_CTRL, cpmuctrl);
10840 /* Release the mutex */
10841 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10844 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10845 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10846 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10847 err |= TG3_PHY_LOOPBACK_FAILED;
10850 /* Re-enable gphy autopowerdown. */
10851 if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
10852 tg3_phy_toggle_apd(tp, true);
10857 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10860 struct tg3 *tp = netdev_priv(dev);
10862 if (tp->link_config.phy_is_low_power)
10863 tg3_set_power_state(tp, PCI_D0);
10865 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10867 if (tg3_test_nvram(tp) != 0) {
10868 etest->flags |= ETH_TEST_FL_FAILED;
10871 if (tg3_test_link(tp) != 0) {
10872 etest->flags |= ETH_TEST_FL_FAILED;
10875 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10876 int err, err2 = 0, irq_sync = 0;
10878 if (netif_running(dev)) {
10880 tg3_netif_stop(tp);
10884 tg3_full_lock(tp, irq_sync);
10886 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10887 err = tg3_nvram_lock(tp);
10888 tg3_halt_cpu(tp, RX_CPU_BASE);
10889 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10890 tg3_halt_cpu(tp, TX_CPU_BASE);
10892 tg3_nvram_unlock(tp);
10894 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10897 if (tg3_test_registers(tp) != 0) {
10898 etest->flags |= ETH_TEST_FL_FAILED;
10901 if (tg3_test_memory(tp) != 0) {
10902 etest->flags |= ETH_TEST_FL_FAILED;
10905 if ((data[4] = tg3_test_loopback(tp)) != 0)
10906 etest->flags |= ETH_TEST_FL_FAILED;
10908 tg3_full_unlock(tp);
10910 if (tg3_test_interrupt(tp) != 0) {
10911 etest->flags |= ETH_TEST_FL_FAILED;
10915 tg3_full_lock(tp, 0);
10917 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10918 if (netif_running(dev)) {
10919 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10920 err2 = tg3_restart_hw(tp, 1);
10922 tg3_netif_start(tp);
10925 tg3_full_unlock(tp);
10927 if (irq_sync && !err2)
10930 if (tp->link_config.phy_is_low_power)
10931 tg3_set_power_state(tp, PCI_D3hot);
10935 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10937 struct mii_ioctl_data *data = if_mii(ifr);
10938 struct tg3 *tp = netdev_priv(dev);
10941 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10942 struct phy_device *phydev;
10943 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10946 return phy_mii_ioctl(phydev, data, cmd);
10951 data->phy_id = tp->phy_addr;
10954 case SIOCGMIIREG: {
10957 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10958 break; /* We have no PHY */
10960 if (tp->link_config.phy_is_low_power)
10963 spin_lock_bh(&tp->lock);
10964 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10965 spin_unlock_bh(&tp->lock);
10967 data->val_out = mii_regval;
10973 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10974 break; /* We have no PHY */
10976 if (tp->link_config.phy_is_low_power)
10979 spin_lock_bh(&tp->lock);
10980 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10981 spin_unlock_bh(&tp->lock);
10989 return -EOPNOTSUPP;
10992 #if TG3_VLAN_TAG_USED
10993 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10995 struct tg3 *tp = netdev_priv(dev);
10997 if (!netif_running(dev)) {
11002 tg3_netif_stop(tp);
11004 tg3_full_lock(tp, 0);
11008 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
11009 __tg3_set_rx_mode(dev);
11011 tg3_netif_start(tp);
11013 tg3_full_unlock(tp);
11017 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11019 struct tg3 *tp = netdev_priv(dev);
11021 memcpy(ec, &tp->coal, sizeof(*ec));
11025 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11027 struct tg3 *tp = netdev_priv(dev);
11028 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11029 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11031 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
11032 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11033 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11034 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11035 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11038 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11039 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11040 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11041 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11042 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11043 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11044 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11045 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11046 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11047 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11050 /* No rx interrupts will be generated if both are zero */
11051 if ((ec->rx_coalesce_usecs == 0) &&
11052 (ec->rx_max_coalesced_frames == 0))
11055 /* No tx interrupts will be generated if both are zero */
11056 if ((ec->tx_coalesce_usecs == 0) &&
11057 (ec->tx_max_coalesced_frames == 0))
11060 /* Only copy relevant parameters, ignore all others. */
11061 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11062 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11063 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11064 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11065 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11066 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11067 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11068 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11069 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11071 if (netif_running(dev)) {
11072 tg3_full_lock(tp, 0);
11073 __tg3_set_coalesce(tp, &tp->coal);
11074 tg3_full_unlock(tp);
11079 static const struct ethtool_ops tg3_ethtool_ops = {
11080 .get_settings = tg3_get_settings,
11081 .set_settings = tg3_set_settings,
11082 .get_drvinfo = tg3_get_drvinfo,
11083 .get_regs_len = tg3_get_regs_len,
11084 .get_regs = tg3_get_regs,
11085 .get_wol = tg3_get_wol,
11086 .set_wol = tg3_set_wol,
11087 .get_msglevel = tg3_get_msglevel,
11088 .set_msglevel = tg3_set_msglevel,
11089 .nway_reset = tg3_nway_reset,
11090 .get_link = ethtool_op_get_link,
11091 .get_eeprom_len = tg3_get_eeprom_len,
11092 .get_eeprom = tg3_get_eeprom,
11093 .set_eeprom = tg3_set_eeprom,
11094 .get_ringparam = tg3_get_ringparam,
11095 .set_ringparam = tg3_set_ringparam,
11096 .get_pauseparam = tg3_get_pauseparam,
11097 .set_pauseparam = tg3_set_pauseparam,
11098 .get_rx_csum = tg3_get_rx_csum,
11099 .set_rx_csum = tg3_set_rx_csum,
11100 .set_tx_csum = tg3_set_tx_csum,
11101 .set_sg = ethtool_op_set_sg,
11102 .set_tso = tg3_set_tso,
11103 .self_test = tg3_self_test,
11104 .get_strings = tg3_get_strings,
11105 .phys_id = tg3_phys_id,
11106 .get_ethtool_stats = tg3_get_ethtool_stats,
11107 .get_coalesce = tg3_get_coalesce,
11108 .set_coalesce = tg3_set_coalesce,
11109 .get_sset_count = tg3_get_sset_count,
11112 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11114 u32 cursize, val, magic;
11116 tp->nvram_size = EEPROM_CHIP_SIZE;
11118 if (tg3_nvram_read(tp, 0, &magic) != 0)
11121 if ((magic != TG3_EEPROM_MAGIC) &&
11122 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11123 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11127 * Size the chip by reading offsets at increasing powers of two.
11128 * When we encounter our validation signature, we know the addressing
11129 * has wrapped around, and thus have our chip size.
11133 while (cursize < tp->nvram_size) {
11134 if (tg3_nvram_read(tp, cursize, &val) != 0)
11143 tp->nvram_size = cursize;
11146 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11150 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
11151 tg3_nvram_read(tp, 0, &val) != 0)
11154 /* Selfboot format */
11155 if (val != TG3_EEPROM_MAGIC) {
11156 tg3_get_eeprom_size(tp);
11160 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11162 /* This is confusing. We want to operate on the
11163 * 16-bit value at offset 0xf2. The tg3_nvram_read()
11164 * call will read from NVRAM and byteswap the data
11165 * according to the byteswapping settings for all
11166 * other register accesses. This ensures the data we
11167 * want will always reside in the lower 16-bits.
11168 * However, the data in NVRAM is in LE format, which
11169 * means the data from the NVRAM read will always be
11170 * opposite the endianness of the CPU. The 16-bit
11171 * byteswap then brings the data to CPU endianness.
11173 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11177 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11180 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11184 nvcfg1 = tr32(NVRAM_CFG1);
11185 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11186 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11188 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11189 tw32(NVRAM_CFG1, nvcfg1);
11192 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
11193 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
11194 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11195 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11196 tp->nvram_jedecnum = JEDEC_ATMEL;
11197 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11198 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11200 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11201 tp->nvram_jedecnum = JEDEC_ATMEL;
11202 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11204 case FLASH_VENDOR_ATMEL_EEPROM:
11205 tp->nvram_jedecnum = JEDEC_ATMEL;
11206 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11207 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11209 case FLASH_VENDOR_ST:
11210 tp->nvram_jedecnum = JEDEC_ST;
11211 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11212 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11214 case FLASH_VENDOR_SAIFUN:
11215 tp->nvram_jedecnum = JEDEC_SAIFUN;
11216 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11218 case FLASH_VENDOR_SST_SMALL:
11219 case FLASH_VENDOR_SST_LARGE:
11220 tp->nvram_jedecnum = JEDEC_SST;
11221 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11225 tp->nvram_jedecnum = JEDEC_ATMEL;
11226 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11227 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11231 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11233 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11234 case FLASH_5752PAGE_SIZE_256:
11235 tp->nvram_pagesize = 256;
11237 case FLASH_5752PAGE_SIZE_512:
11238 tp->nvram_pagesize = 512;
11240 case FLASH_5752PAGE_SIZE_1K:
11241 tp->nvram_pagesize = 1024;
11243 case FLASH_5752PAGE_SIZE_2K:
11244 tp->nvram_pagesize = 2048;
11246 case FLASH_5752PAGE_SIZE_4K:
11247 tp->nvram_pagesize = 4096;
11249 case FLASH_5752PAGE_SIZE_264:
11250 tp->nvram_pagesize = 264;
11252 case FLASH_5752PAGE_SIZE_528:
11253 tp->nvram_pagesize = 528;
11258 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11262 nvcfg1 = tr32(NVRAM_CFG1);
11264 /* NVRAM protection for TPM */
11265 if (nvcfg1 & (1 << 27))
11266 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11268 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11269 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11270 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11271 tp->nvram_jedecnum = JEDEC_ATMEL;
11272 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11274 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11275 tp->nvram_jedecnum = JEDEC_ATMEL;
11276 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11277 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11279 case FLASH_5752VENDOR_ST_M45PE10:
11280 case FLASH_5752VENDOR_ST_M45PE20:
11281 case FLASH_5752VENDOR_ST_M45PE40:
11282 tp->nvram_jedecnum = JEDEC_ST;
11283 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11284 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11288 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
11289 tg3_nvram_get_pagesize(tp, nvcfg1);
11291 /* For eeprom, set pagesize to maximum eeprom size */
11292 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11294 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11295 tw32(NVRAM_CFG1, nvcfg1);
11299 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11301 u32 nvcfg1, protect = 0;
11303 nvcfg1 = tr32(NVRAM_CFG1);
11305 /* NVRAM protection for TPM */
11306 if (nvcfg1 & (1 << 27)) {
11307 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11311 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11313 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11314 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11315 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11316 case FLASH_5755VENDOR_ATMEL_FLASH_5:
11317 tp->nvram_jedecnum = JEDEC_ATMEL;
11318 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11319 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11320 tp->nvram_pagesize = 264;
11321 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11322 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11323 tp->nvram_size = (protect ? 0x3e200 :
11324 TG3_NVRAM_SIZE_512KB);
11325 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11326 tp->nvram_size = (protect ? 0x1f200 :
11327 TG3_NVRAM_SIZE_256KB);
11329 tp->nvram_size = (protect ? 0x1f200 :
11330 TG3_NVRAM_SIZE_128KB);
11332 case FLASH_5752VENDOR_ST_M45PE10:
11333 case FLASH_5752VENDOR_ST_M45PE20:
11334 case FLASH_5752VENDOR_ST_M45PE40:
11335 tp->nvram_jedecnum = JEDEC_ST;
11336 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11337 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11338 tp->nvram_pagesize = 256;
11339 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11340 tp->nvram_size = (protect ?
11341 TG3_NVRAM_SIZE_64KB :
11342 TG3_NVRAM_SIZE_128KB);
11343 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11344 tp->nvram_size = (protect ?
11345 TG3_NVRAM_SIZE_64KB :
11346 TG3_NVRAM_SIZE_256KB);
11348 tp->nvram_size = (protect ?
11349 TG3_NVRAM_SIZE_128KB :
11350 TG3_NVRAM_SIZE_512KB);
11355 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11359 nvcfg1 = tr32(NVRAM_CFG1);
11361 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11362 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11363 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11364 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11365 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11366 tp->nvram_jedecnum = JEDEC_ATMEL;
11367 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11368 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11370 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11371 tw32(NVRAM_CFG1, nvcfg1);
11373 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11374 case FLASH_5755VENDOR_ATMEL_FLASH_1:
11375 case FLASH_5755VENDOR_ATMEL_FLASH_2:
11376 case FLASH_5755VENDOR_ATMEL_FLASH_3:
11377 tp->nvram_jedecnum = JEDEC_ATMEL;
11378 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11379 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11380 tp->nvram_pagesize = 264;
11382 case FLASH_5752VENDOR_ST_M45PE10:
11383 case FLASH_5752VENDOR_ST_M45PE20:
11384 case FLASH_5752VENDOR_ST_M45PE40:
11385 tp->nvram_jedecnum = JEDEC_ST;
11386 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11387 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11388 tp->nvram_pagesize = 256;
11393 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11395 u32 nvcfg1, protect = 0;
11397 nvcfg1 = tr32(NVRAM_CFG1);
11399 /* NVRAM protection for TPM */
11400 if (nvcfg1 & (1 << 27)) {
11401 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11405 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11407 case FLASH_5761VENDOR_ATMEL_ADB021D:
11408 case FLASH_5761VENDOR_ATMEL_ADB041D:
11409 case FLASH_5761VENDOR_ATMEL_ADB081D:
11410 case FLASH_5761VENDOR_ATMEL_ADB161D:
11411 case FLASH_5761VENDOR_ATMEL_MDB021D:
11412 case FLASH_5761VENDOR_ATMEL_MDB041D:
11413 case FLASH_5761VENDOR_ATMEL_MDB081D:
11414 case FLASH_5761VENDOR_ATMEL_MDB161D:
11415 tp->nvram_jedecnum = JEDEC_ATMEL;
11416 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11417 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11418 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11419 tp->nvram_pagesize = 256;
11421 case FLASH_5761VENDOR_ST_A_M45PE20:
11422 case FLASH_5761VENDOR_ST_A_M45PE40:
11423 case FLASH_5761VENDOR_ST_A_M45PE80:
11424 case FLASH_5761VENDOR_ST_A_M45PE16:
11425 case FLASH_5761VENDOR_ST_M_M45PE20:
11426 case FLASH_5761VENDOR_ST_M_M45PE40:
11427 case FLASH_5761VENDOR_ST_M_M45PE80:
11428 case FLASH_5761VENDOR_ST_M_M45PE16:
11429 tp->nvram_jedecnum = JEDEC_ST;
11430 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11431 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11432 tp->nvram_pagesize = 256;
11437 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11440 case FLASH_5761VENDOR_ATMEL_ADB161D:
11441 case FLASH_5761VENDOR_ATMEL_MDB161D:
11442 case FLASH_5761VENDOR_ST_A_M45PE16:
11443 case FLASH_5761VENDOR_ST_M_M45PE16:
11444 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11446 case FLASH_5761VENDOR_ATMEL_ADB081D:
11447 case FLASH_5761VENDOR_ATMEL_MDB081D:
11448 case FLASH_5761VENDOR_ST_A_M45PE80:
11449 case FLASH_5761VENDOR_ST_M_M45PE80:
11450 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11452 case FLASH_5761VENDOR_ATMEL_ADB041D:
11453 case FLASH_5761VENDOR_ATMEL_MDB041D:
11454 case FLASH_5761VENDOR_ST_A_M45PE40:
11455 case FLASH_5761VENDOR_ST_M_M45PE40:
11456 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11458 case FLASH_5761VENDOR_ATMEL_ADB021D:
11459 case FLASH_5761VENDOR_ATMEL_MDB021D:
11460 case FLASH_5761VENDOR_ST_A_M45PE20:
11461 case FLASH_5761VENDOR_ST_M_M45PE20:
11462 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11468 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11470 tp->nvram_jedecnum = JEDEC_ATMEL;
11471 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11472 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11475 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11479 nvcfg1 = tr32(NVRAM_CFG1);
11481 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11482 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11483 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11484 tp->nvram_jedecnum = JEDEC_ATMEL;
11485 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11486 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11488 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11489 tw32(NVRAM_CFG1, nvcfg1);
11491 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11492 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11493 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11494 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11495 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11496 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11497 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11498 tp->nvram_jedecnum = JEDEC_ATMEL;
11499 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11500 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11502 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11503 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11504 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11505 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11506 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11508 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11509 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11510 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11512 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11513 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11514 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11518 case FLASH_5752VENDOR_ST_M45PE10:
11519 case FLASH_5752VENDOR_ST_M45PE20:
11520 case FLASH_5752VENDOR_ST_M45PE40:
11521 tp->nvram_jedecnum = JEDEC_ST;
11522 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11523 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11525 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11526 case FLASH_5752VENDOR_ST_M45PE10:
11527 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11529 case FLASH_5752VENDOR_ST_M45PE20:
11530 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11532 case FLASH_5752VENDOR_ST_M45PE40:
11533 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11538 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11542 tg3_nvram_get_pagesize(tp, nvcfg1);
11543 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11544 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11548 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
11552 nvcfg1 = tr32(NVRAM_CFG1);
11554 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11555 case FLASH_5717VENDOR_ATMEL_EEPROM:
11556 case FLASH_5717VENDOR_MICRO_EEPROM:
11557 tp->nvram_jedecnum = JEDEC_ATMEL;
11558 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11559 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11561 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11562 tw32(NVRAM_CFG1, nvcfg1);
11564 case FLASH_5717VENDOR_ATMEL_MDB011D:
11565 case FLASH_5717VENDOR_ATMEL_ADB011B:
11566 case FLASH_5717VENDOR_ATMEL_ADB011D:
11567 case FLASH_5717VENDOR_ATMEL_MDB021D:
11568 case FLASH_5717VENDOR_ATMEL_ADB021B:
11569 case FLASH_5717VENDOR_ATMEL_ADB021D:
11570 case FLASH_5717VENDOR_ATMEL_45USPT:
11571 tp->nvram_jedecnum = JEDEC_ATMEL;
11572 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11573 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11575 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11576 case FLASH_5717VENDOR_ATMEL_MDB021D:
11577 case FLASH_5717VENDOR_ATMEL_ADB021B:
11578 case FLASH_5717VENDOR_ATMEL_ADB021D:
11579 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11582 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11586 case FLASH_5717VENDOR_ST_M_M25PE10:
11587 case FLASH_5717VENDOR_ST_A_M25PE10:
11588 case FLASH_5717VENDOR_ST_M_M45PE10:
11589 case FLASH_5717VENDOR_ST_A_M45PE10:
11590 case FLASH_5717VENDOR_ST_M_M25PE20:
11591 case FLASH_5717VENDOR_ST_A_M25PE20:
11592 case FLASH_5717VENDOR_ST_M_M45PE20:
11593 case FLASH_5717VENDOR_ST_A_M45PE20:
11594 case FLASH_5717VENDOR_ST_25USPT:
11595 case FLASH_5717VENDOR_ST_45USPT:
11596 tp->nvram_jedecnum = JEDEC_ST;
11597 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
11598 tp->tg3_flags2 |= TG3_FLG2_FLASH;
11600 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11601 case FLASH_5717VENDOR_ST_M_M25PE20:
11602 case FLASH_5717VENDOR_ST_A_M25PE20:
11603 case FLASH_5717VENDOR_ST_M_M45PE20:
11604 case FLASH_5717VENDOR_ST_A_M45PE20:
11605 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11608 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11613 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM;
11617 tg3_nvram_get_pagesize(tp, nvcfg1);
11618 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
11619 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
11622 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
11623 static void __devinit tg3_nvram_init(struct tg3 *tp)
11625 tw32_f(GRC_EEPROM_ADDR,
11626 (EEPROM_ADDR_FSM_RESET |
11627 (EEPROM_DEFAULT_CLOCK_PERIOD <<
11628 EEPROM_ADDR_CLKPERD_SHIFT)));
11632 /* Enable seeprom accesses. */
11633 tw32_f(GRC_LOCAL_CTRL,
11634 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
11637 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
11638 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
11639 tp->tg3_flags |= TG3_FLAG_NVRAM;
11641 if (tg3_nvram_lock(tp)) {
11642 netdev_warn(tp->dev,
11643 "Cannot get nvram lock, %s failed\n",
11647 tg3_enable_nvram_access(tp);
11649 tp->nvram_size = 0;
11651 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
11652 tg3_get_5752_nvram_info(tp);
11653 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
11654 tg3_get_5755_nvram_info(tp);
11655 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
11656 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
11657 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11658 tg3_get_5787_nvram_info(tp);
11659 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
11660 tg3_get_5761_nvram_info(tp);
11661 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11662 tg3_get_5906_nvram_info(tp);
11663 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
11664 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
11665 tg3_get_57780_nvram_info(tp);
11666 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
11667 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
11668 tg3_get_5717_nvram_info(tp);
11670 tg3_get_nvram_info(tp);
11672 if (tp->nvram_size == 0)
11673 tg3_get_nvram_size(tp);
11675 tg3_disable_nvram_access(tp);
11676 tg3_nvram_unlock(tp);
11679 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
11681 tg3_get_eeprom_size(tp);
11685 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11686 u32 offset, u32 len, u8 *buf)
11691 for (i = 0; i < len; i += 4) {
11697 memcpy(&data, buf + i, 4);
11700 * The SEEPROM interface expects the data to always be opposite
11701 * the native endian format. We accomplish this by reversing
11702 * all the operations that would have been performed on the
11703 * data from a call to tg3_nvram_read_be32().
11705 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
11707 val = tr32(GRC_EEPROM_ADDR);
11708 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11710 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11712 tw32(GRC_EEPROM_ADDR, val |
11713 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11714 (addr & EEPROM_ADDR_ADDR_MASK) |
11715 EEPROM_ADDR_START |
11716 EEPROM_ADDR_WRITE);
11718 for (j = 0; j < 1000; j++) {
11719 val = tr32(GRC_EEPROM_ADDR);
11721 if (val & EEPROM_ADDR_COMPLETE)
11725 if (!(val & EEPROM_ADDR_COMPLETE)) {
11734 /* offset and length are dword aligned */
11735 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11739 u32 pagesize = tp->nvram_pagesize;
11740 u32 pagemask = pagesize - 1;
11744 tmp = kmalloc(pagesize, GFP_KERNEL);
11750 u32 phy_addr, page_off, size;
11752 phy_addr = offset & ~pagemask;
11754 for (j = 0; j < pagesize; j += 4) {
11755 ret = tg3_nvram_read_be32(tp, phy_addr + j,
11756 (__be32 *) (tmp + j));
11763 page_off = offset & pagemask;
11770 memcpy(tmp + page_off, buf, size);
11772 offset = offset + (pagesize - page_off);
11774 tg3_enable_nvram_access(tp);
11777 * Before we can erase the flash page, we need
11778 * to issue a special "write enable" command.
11780 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11782 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11785 /* Erase the target page */
11786 tw32(NVRAM_ADDR, phy_addr);
11788 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11789 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11791 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11794 /* Issue another write enable to start the write. */
11795 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11797 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11800 for (j = 0; j < pagesize; j += 4) {
11803 data = *((__be32 *) (tmp + j));
11805 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11807 tw32(NVRAM_ADDR, phy_addr + j);
11809 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11813 nvram_cmd |= NVRAM_CMD_FIRST;
11814 else if (j == (pagesize - 4))
11815 nvram_cmd |= NVRAM_CMD_LAST;
11817 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11824 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11825 tg3_nvram_exec_cmd(tp, nvram_cmd);
11832 /* offset and length are dword aligned */
11833 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11838 for (i = 0; i < len; i += 4, offset += 4) {
11839 u32 page_off, phy_addr, nvram_cmd;
11842 memcpy(&data, buf + i, 4);
11843 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11845 page_off = offset % tp->nvram_pagesize;
11847 phy_addr = tg3_nvram_phys_addr(tp, offset);
11849 tw32(NVRAM_ADDR, phy_addr);
11851 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11853 if (page_off == 0 || i == 0)
11854 nvram_cmd |= NVRAM_CMD_FIRST;
11855 if (page_off == (tp->nvram_pagesize - 4))
11856 nvram_cmd |= NVRAM_CMD_LAST;
11858 if (i == (len - 4))
11859 nvram_cmd |= NVRAM_CMD_LAST;
11861 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
11862 !(tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
11863 (tp->nvram_jedecnum == JEDEC_ST) &&
11864 (nvram_cmd & NVRAM_CMD_FIRST)) {
11866 if ((ret = tg3_nvram_exec_cmd(tp,
11867 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11872 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11873 /* We always do complete word writes to eeprom. */
11874 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11877 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11883 /* offset and length are dword aligned */
11884 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11888 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11889 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11890 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11894 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11895 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11899 ret = tg3_nvram_lock(tp);
11903 tg3_enable_nvram_access(tp);
11904 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11905 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11906 tw32(NVRAM_WRITE1, 0x406);
11908 grc_mode = tr32(GRC_MODE);
11909 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11911 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11912 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11914 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11917 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11921 grc_mode = tr32(GRC_MODE);
11922 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11924 tg3_disable_nvram_access(tp);
11925 tg3_nvram_unlock(tp);
11928 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11929 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11936 struct subsys_tbl_ent {
11937 u16 subsys_vendor, subsys_devid;
11941 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
11942 /* Broadcom boards. */
11943 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11944 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
11945 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11946 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
11947 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11948 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
11949 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11950 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
11951 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11952 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
11953 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11954 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
11955 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11956 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
11957 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11958 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
11959 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11960 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
11961 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11962 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
11963 { TG3PCI_SUBVENDOR_ID_BROADCOM,
11964 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
11967 { TG3PCI_SUBVENDOR_ID_3COM,
11968 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
11969 { TG3PCI_SUBVENDOR_ID_3COM,
11970 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
11971 { TG3PCI_SUBVENDOR_ID_3COM,
11972 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
11973 { TG3PCI_SUBVENDOR_ID_3COM,
11974 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
11975 { TG3PCI_SUBVENDOR_ID_3COM,
11976 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
11979 { TG3PCI_SUBVENDOR_ID_DELL,
11980 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
11981 { TG3PCI_SUBVENDOR_ID_DELL,
11982 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
11983 { TG3PCI_SUBVENDOR_ID_DELL,
11984 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
11985 { TG3PCI_SUBVENDOR_ID_DELL,
11986 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
11988 /* Compaq boards. */
11989 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11990 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
11991 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11992 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
11993 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11994 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
11995 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11996 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
11997 { TG3PCI_SUBVENDOR_ID_COMPAQ,
11998 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12001 { TG3PCI_SUBVENDOR_ID_IBM,
12002 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12005 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12009 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12010 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12011 tp->pdev->subsystem_vendor) &&
12012 (subsys_id_to_phy_id[i].subsys_devid ==
12013 tp->pdev->subsystem_device))
12014 return &subsys_id_to_phy_id[i];
12019 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12024 /* On some early chips the SRAM cannot be accessed in D3hot state,
12025 * so need make sure we're in D0.
12027 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12028 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12029 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12032 /* Make sure register accesses (indirect or otherwise)
12033 * will function correctly.
12035 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12036 tp->misc_host_ctrl);
12038 /* The memory arbiter has to be enabled in order for SRAM accesses
12039 * to succeed. Normally on powerup the tg3 chip firmware will make
12040 * sure it is enabled, but other entities such as system netboot
12041 * code might disable it.
12043 val = tr32(MEMARB_MODE);
12044 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12046 tp->phy_id = TG3_PHY_ID_INVALID;
12047 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12049 /* Assume an onboard device and WOL capable by default. */
12050 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
12052 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12053 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12054 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12055 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12057 val = tr32(VCPU_CFGSHDW);
12058 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12059 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12060 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12061 (val & VCPU_CFGSHDW_WOL_MAGPKT))
12062 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12066 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12067 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12068 u32 nic_cfg, led_cfg;
12069 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12070 int eeprom_phy_serdes = 0;
12072 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12073 tp->nic_sram_data_cfg = nic_cfg;
12075 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12076 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12077 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
12078 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
12079 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
12080 (ver > 0) && (ver < 0x100))
12081 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12084 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12086 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12087 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12088 eeprom_phy_serdes = 1;
12090 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12091 if (nic_phy_id != 0) {
12092 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12093 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12095 eeprom_phy_id = (id1 >> 16) << 10;
12096 eeprom_phy_id |= (id2 & 0xfc00) << 16;
12097 eeprom_phy_id |= (id2 & 0x03ff) << 0;
12101 tp->phy_id = eeprom_phy_id;
12102 if (eeprom_phy_serdes) {
12103 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12104 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12106 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
12109 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12110 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12111 SHASTA_EXT_LED_MODE_MASK);
12113 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12117 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12118 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12121 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12122 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12125 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12126 tp->led_ctrl = LED_CTRL_MODE_MAC;
12128 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12129 * read on some older 5700/5701 bootcode.
12131 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12133 GET_ASIC_REV(tp->pci_chip_rev_id) ==
12135 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12139 case SHASTA_EXT_LED_SHARED:
12140 tp->led_ctrl = LED_CTRL_MODE_SHARED;
12141 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12142 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12143 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12144 LED_CTRL_MODE_PHY_2);
12147 case SHASTA_EXT_LED_MAC:
12148 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12151 case SHASTA_EXT_LED_COMBO:
12152 tp->led_ctrl = LED_CTRL_MODE_COMBO;
12153 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12154 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12155 LED_CTRL_MODE_PHY_2);
12160 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12161 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12162 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12163 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12165 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12166 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12168 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12169 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
12170 if ((tp->pdev->subsystem_vendor ==
12171 PCI_VENDOR_ID_ARIMA) &&
12172 (tp->pdev->subsystem_device == 0x205a ||
12173 tp->pdev->subsystem_device == 0x2063))
12174 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12176 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
12177 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
12180 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12181 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
12182 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
12183 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
12186 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12187 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12188 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
12190 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
12191 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12192 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
12194 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
12195 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
12196 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
12198 if (cfg2 & (1 << 17))
12199 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
12201 /* serdes signal pre-emphasis in register 0x590 set by */
12202 /* bootcode if bit 18 is set */
12203 if (cfg2 & (1 << 18))
12204 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
12206 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12207 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12208 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12209 tp->tg3_flags3 |= TG3_FLG3_PHY_ENABLE_APD;
12211 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12214 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12215 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12216 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
12219 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12220 tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
12221 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12222 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
12223 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12224 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
12227 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
12228 device_set_wakeup_enable(&tp->pdev->dev,
12229 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
12232 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12237 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12238 tw32(OTP_CTRL, cmd);
12240 /* Wait for up to 1 ms for command to execute. */
12241 for (i = 0; i < 100; i++) {
12242 val = tr32(OTP_STATUS);
12243 if (val & OTP_STATUS_CMD_DONE)
12248 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12251 /* Read the gphy configuration from the OTP region of the chip. The gphy
12252 * configuration is a 32-bit value that straddles the alignment boundary.
12253 * We do two 32-bit reads and then shift and merge the results.
12255 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12257 u32 bhalf_otp, thalf_otp;
12259 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12261 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12264 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12266 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12269 thalf_otp = tr32(OTP_READ_DATA);
12271 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12273 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12276 bhalf_otp = tr32(OTP_READ_DATA);
12278 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12281 static int __devinit tg3_phy_probe(struct tg3 *tp)
12283 u32 hw_phy_id_1, hw_phy_id_2;
12284 u32 hw_phy_id, hw_phy_id_masked;
12287 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
12288 return tg3_phy_init(tp);
12290 /* Reading the PHY ID register can conflict with ASF
12291 * firmware access to the PHY hardware.
12294 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12295 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
12296 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12298 /* Now read the physical PHY_ID from the chip and verify
12299 * that it is sane. If it doesn't look good, we fall back
12300 * to either the hard-coded table based PHY_ID and failing
12301 * that the value found in the eeprom area.
12303 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12304 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12306 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
12307 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12308 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
12310 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12313 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12314 tp->phy_id = hw_phy_id;
12315 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12316 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12318 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
12320 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12321 /* Do nothing, phy ID already set up in
12322 * tg3_get_eeprom_hw_cfg().
12325 struct subsys_tbl_ent *p;
12327 /* No eeprom signature? Try the hardcoded
12328 * subsys device table.
12330 p = tg3_lookup_by_subsys(tp);
12334 tp->phy_id = p->phy_id;
12336 tp->phy_id == TG3_PHY_ID_BCM8002)
12337 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
12341 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
12342 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
12343 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
12344 u32 bmsr, adv_reg, tg3_ctrl, mask;
12346 tg3_readphy(tp, MII_BMSR, &bmsr);
12347 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12348 (bmsr & BMSR_LSTATUS))
12349 goto skip_phy_reset;
12351 err = tg3_phy_reset(tp);
12355 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
12356 ADVERTISE_100HALF | ADVERTISE_100FULL |
12357 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
12359 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
12360 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
12361 MII_TG3_CTRL_ADV_1000_FULL);
12362 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12363 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
12364 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
12365 MII_TG3_CTRL_ENABLE_AS_MASTER);
12368 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12369 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12370 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12371 if (!tg3_copper_is_advertising_all(tp, mask)) {
12372 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12374 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12375 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12377 tg3_writephy(tp, MII_BMCR,
12378 BMCR_ANENABLE | BMCR_ANRESTART);
12380 tg3_phy_set_wirespeed(tp);
12382 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
12383 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
12384 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
12388 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
12389 err = tg3_init_5401phy_dsp(tp);
12393 err = tg3_init_5401phy_dsp(tp);
12396 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
12397 tp->link_config.advertising =
12398 (ADVERTISED_1000baseT_Half |
12399 ADVERTISED_1000baseT_Full |
12400 ADVERTISED_Autoneg |
12402 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
12403 tp->link_config.advertising &=
12404 ~(ADVERTISED_1000baseT_Half |
12405 ADVERTISED_1000baseT_Full);
12410 static void __devinit tg3_read_vpd(struct tg3 *tp)
12412 u8 vpd_data[TG3_NVM_VPD_LEN];
12413 unsigned int block_end, rosize, len;
12417 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
12418 tg3_nvram_read(tp, 0x0, &magic))
12419 goto out_not_found;
12421 if (magic == TG3_EEPROM_MAGIC) {
12422 for (i = 0; i < TG3_NVM_VPD_LEN; i += 4) {
12425 /* The data is in little-endian format in NVRAM.
12426 * Use the big-endian read routines to preserve
12427 * the byte order as it exists in NVRAM.
12429 if (tg3_nvram_read_be32(tp, TG3_NVM_VPD_OFF + i, &tmp))
12430 goto out_not_found;
12432 memcpy(&vpd_data[i], &tmp, sizeof(tmp));
12436 unsigned int pos = 0;
12438 for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
12439 cnt = pci_read_vpd(tp->pdev, pos,
12440 TG3_NVM_VPD_LEN - pos,
12442 if (cnt == -ETIMEDOUT || -EINTR)
12445 goto out_not_found;
12447 if (pos != TG3_NVM_VPD_LEN)
12448 goto out_not_found;
12451 i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
12452 PCI_VPD_LRDT_RO_DATA);
12454 goto out_not_found;
12456 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
12457 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
12458 i += PCI_VPD_LRDT_TAG_SIZE;
12460 if (block_end > TG3_NVM_VPD_LEN)
12461 goto out_not_found;
12463 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12464 PCI_VPD_RO_KEYWORD_MFR_ID);
12466 len = pci_vpd_info_field_size(&vpd_data[j]);
12468 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12469 if (j + len > block_end || len != 4 ||
12470 memcmp(&vpd_data[j], "1028", 4))
12473 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12474 PCI_VPD_RO_KEYWORD_VENDOR0);
12478 len = pci_vpd_info_field_size(&vpd_data[j]);
12480 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12481 if (j + len > block_end)
12484 memcpy(tp->fw_ver, &vpd_data[j], len);
12485 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12489 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12490 PCI_VPD_RO_KEYWORD_PARTNO);
12492 goto out_not_found;
12494 len = pci_vpd_info_field_size(&vpd_data[i]);
12496 i += PCI_VPD_INFO_FLD_HDR_SIZE;
12497 if (len > TG3_BPN_SIZE ||
12498 (len + i) > TG3_NVM_VPD_LEN)
12499 goto out_not_found;
12501 memcpy(tp->board_part_number, &vpd_data[i], len);
12506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12507 strcpy(tp->board_part_number, "BCM95906");
12508 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12509 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
12510 strcpy(tp->board_part_number, "BCM57780");
12511 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12512 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
12513 strcpy(tp->board_part_number, "BCM57760");
12514 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12515 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
12516 strcpy(tp->board_part_number, "BCM57790");
12517 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
12518 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
12519 strcpy(tp->board_part_number, "BCM57788");
12520 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12521 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
12522 strcpy(tp->board_part_number, "BCM57761");
12523 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12524 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
12525 strcpy(tp->board_part_number, "BCM57765");
12526 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12527 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
12528 strcpy(tp->board_part_number, "BCM57781");
12529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12530 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
12531 strcpy(tp->board_part_number, "BCM57785");
12532 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12533 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
12534 strcpy(tp->board_part_number, "BCM57791");
12535 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12536 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12537 strcpy(tp->board_part_number, "BCM57795");
12539 strcpy(tp->board_part_number, "none");
12542 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12546 if (tg3_nvram_read(tp, offset, &val) ||
12547 (val & 0xfc000000) != 0x0c000000 ||
12548 tg3_nvram_read(tp, offset + 4, &val) ||
12555 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12557 u32 val, offset, start, ver_offset;
12559 bool newver = false;
12561 if (tg3_nvram_read(tp, 0xc, &offset) ||
12562 tg3_nvram_read(tp, 0x4, &start))
12565 offset = tg3_nvram_logical_addr(tp, offset);
12567 if (tg3_nvram_read(tp, offset, &val))
12570 if ((val & 0xfc000000) == 0x0c000000) {
12571 if (tg3_nvram_read(tp, offset + 4, &val))
12578 dst_off = strlen(tp->fw_ver);
12581 if (TG3_VER_SIZE - dst_off < 16 ||
12582 tg3_nvram_read(tp, offset + 8, &ver_offset))
12585 offset = offset + ver_offset - start;
12586 for (i = 0; i < 16; i += 4) {
12588 if (tg3_nvram_read_be32(tp, offset + i, &v))
12591 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12596 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
12599 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12600 TG3_NVM_BCVER_MAJSFT;
12601 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12602 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12603 "v%d.%02d", major, minor);
12607 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
12609 u32 val, major, minor;
12611 /* Use native endian representation */
12612 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
12615 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
12616 TG3_NVM_HWSB_CFG1_MAJSFT;
12617 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
12618 TG3_NVM_HWSB_CFG1_MINSFT;
12620 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
12623 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12625 u32 offset, major, minor, build;
12627 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12629 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12632 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
12633 case TG3_EEPROM_SB_REVISION_0:
12634 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
12636 case TG3_EEPROM_SB_REVISION_2:
12637 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
12639 case TG3_EEPROM_SB_REVISION_3:
12640 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
12642 case TG3_EEPROM_SB_REVISION_4:
12643 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
12645 case TG3_EEPROM_SB_REVISION_5:
12646 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
12652 if (tg3_nvram_read(tp, offset, &val))
12655 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
12656 TG3_EEPROM_SB_EDH_BLD_SHFT;
12657 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
12658 TG3_EEPROM_SB_EDH_MAJ_SHFT;
12659 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
12661 if (minor > 99 || build > 26)
12664 offset = strlen(tp->fw_ver);
12665 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12666 " v%d.%02d", major, minor);
12669 offset = strlen(tp->fw_ver);
12670 if (offset < TG3_VER_SIZE - 1)
12671 tp->fw_ver[offset] = 'a' + build - 1;
12675 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
12677 u32 val, offset, start;
12680 for (offset = TG3_NVM_DIR_START;
12681 offset < TG3_NVM_DIR_END;
12682 offset += TG3_NVM_DIRENT_SIZE) {
12683 if (tg3_nvram_read(tp, offset, &val))
12686 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
12690 if (offset == TG3_NVM_DIR_END)
12693 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
12694 start = 0x08000000;
12695 else if (tg3_nvram_read(tp, offset - 4, &start))
12698 if (tg3_nvram_read(tp, offset + 4, &offset) ||
12699 !tg3_fw_img_is_valid(tp, offset) ||
12700 tg3_nvram_read(tp, offset + 8, &val))
12703 offset += val - start;
12705 vlen = strlen(tp->fw_ver);
12707 tp->fw_ver[vlen++] = ',';
12708 tp->fw_ver[vlen++] = ' ';
12710 for (i = 0; i < 4; i++) {
12712 if (tg3_nvram_read_be32(tp, offset, &v))
12715 offset += sizeof(v);
12717 if (vlen > TG3_VER_SIZE - sizeof(v)) {
12718 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
12722 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
12727 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12732 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) ||
12733 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
12736 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
12737 if (apedata != APE_SEG_SIG_MAGIC)
12740 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
12741 if (!(apedata & APE_FW_STATUS_READY))
12744 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
12746 vlen = strlen(tp->fw_ver);
12748 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " DASH v%d.%d.%d.%d",
12749 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
12750 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
12751 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
12752 (apedata & APE_FW_VERSION_BLDMSK));
12755 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12758 bool vpd_vers = false;
12760 if (tp->fw_ver[0] != 0)
12763 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12764 strcat(tp->fw_ver, "sb");
12768 if (tg3_nvram_read(tp, 0, &val))
12771 if (val == TG3_EEPROM_MAGIC)
12772 tg3_read_bc_ver(tp);
12773 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
12774 tg3_read_sb_ver(tp, val);
12775 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12776 tg3_read_hwsb_ver(tp);
12780 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12781 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12784 tg3_read_mgmtfw_ver(tp);
12787 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12790 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
12792 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
12794 #if TG3_VLAN_TAG_USED
12795 dev->vlan_features |= flags;
12799 static int __devinit tg3_get_invariants(struct tg3 *tp)
12801 static struct pci_device_id write_reorder_chipsets[] = {
12802 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12803 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12804 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12805 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12806 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12807 PCI_DEVICE_ID_VIA_8385_0) },
12811 u32 pci_state_reg, grc_misc_cfg;
12816 /* Force memory write invalidate off. If we leave it on,
12817 * then on 5700_BX chips we have to enable a workaround.
12818 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
12819 * to match the cacheline size. The Broadcom driver have this
12820 * workaround but turns MWI off all the times so never uses
12821 * it. This seems to suggest that the workaround is insufficient.
12823 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12824 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
12825 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12827 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
12828 * has the register indirect write enable bit set before
12829 * we try to access any of the MMIO registers. It is also
12830 * critical that the PCI-X hw workaround situation is decided
12831 * before that as well.
12833 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12836 tp->pci_chip_rev_id = (misc_ctrl_reg >>
12837 MISC_HOST_CTRL_CHIPREV_SHIFT);
12838 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12839 u32 prod_id_asic_rev;
12841 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12842 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12843 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724 ||
12844 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719)
12845 pci_read_config_dword(tp->pdev,
12846 TG3PCI_GEN2_PRODID_ASICREV,
12847 &prod_id_asic_rev);
12848 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
12849 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
12850 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
12851 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
12852 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
12853 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
12854 pci_read_config_dword(tp->pdev,
12855 TG3PCI_GEN15_PRODID_ASICREV,
12856 &prod_id_asic_rev);
12858 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
12859 &prod_id_asic_rev);
12861 tp->pci_chip_rev_id = prod_id_asic_rev;
12864 /* Wrong chip ID in 5752 A0. This code can be removed later
12865 * as A0 is not in production.
12867 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12868 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12870 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12871 * we need to disable memory and use config. cycles
12872 * only to access all registers. The 5702/03 chips
12873 * can mistakenly decode the special cycles from the
12874 * ICH chipsets as memory write cycles, causing corruption
12875 * of register and memory space. Only certain ICH bridges
12876 * will drive special cycles with non-zero data during the
12877 * address phase which can fall within the 5703's address
12878 * range. This is not an ICH bug as the PCI spec allows
12879 * non-zero address during special cycles. However, only
12880 * these ICH bridges are known to drive non-zero addresses
12881 * during special cycles.
12883 * Since special cycles do not cross PCI bridges, we only
12884 * enable this workaround if the 5703 is on the secondary
12885 * bus of these ICH bridges.
12887 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12888 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12889 static struct tg3_dev_id {
12893 } ich_chipsets[] = {
12894 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12896 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12898 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12900 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12904 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12905 struct pci_dev *bridge = NULL;
12907 while (pci_id->vendor != 0) {
12908 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12914 if (pci_id->rev != PCI_ANY_ID) {
12915 if (bridge->revision > pci_id->rev)
12918 if (bridge->subordinate &&
12919 (bridge->subordinate->number ==
12920 tp->pdev->bus->number)) {
12922 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12923 pci_dev_put(bridge);
12929 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12930 static struct tg3_dev_id {
12933 } bridge_chipsets[] = {
12934 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12935 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12938 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12939 struct pci_dev *bridge = NULL;
12941 while (pci_id->vendor != 0) {
12942 bridge = pci_get_device(pci_id->vendor,
12949 if (bridge->subordinate &&
12950 (bridge->subordinate->number <=
12951 tp->pdev->bus->number) &&
12952 (bridge->subordinate->subordinate >=
12953 tp->pdev->bus->number)) {
12954 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12955 pci_dev_put(bridge);
12961 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12962 * DMA addresses > 40-bit. This bridge may have other additional
12963 * 57xx devices behind it in some 4-port NIC designs for example.
12964 * Any tg3 device found behind the bridge will also need the 40-bit
12967 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12969 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12970 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12971 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12973 struct pci_dev *bridge = NULL;
12976 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12977 PCI_DEVICE_ID_SERVERWORKS_EPB,
12979 if (bridge && bridge->subordinate &&
12980 (bridge->subordinate->number <=
12981 tp->pdev->bus->number) &&
12982 (bridge->subordinate->subordinate >=
12983 tp->pdev->bus->number)) {
12984 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12985 pci_dev_put(bridge);
12991 /* Initialize misc host control in PCI block. */
12992 tp->misc_host_ctrl |= (misc_ctrl_reg &
12993 MISC_HOST_CTRL_CHIPREV);
12994 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12995 tp->misc_host_ctrl);
12997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
12998 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
12999 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13000 tp->pdev_peer = tg3_find_peer(tp);
13002 /* Intentionally exclude ASIC_REV_5906 */
13003 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13004 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13005 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13006 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13012 tp->tg3_flags3 |= TG3_FLG3_5755_PLUS;
13014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13015 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13017 (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13018 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13019 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
13021 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
13022 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
13023 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
13025 /* 5700 B0 chips do not support checksumming correctly due
13026 * to hardware bugs.
13028 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
13029 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
13031 unsigned long features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
13033 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13034 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13035 features |= NETIF_F_IPV6_CSUM;
13036 tp->dev->features |= features;
13037 vlan_features_add(tp->dev, features);
13040 /* Determine TSO capabilities */
13041 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13042 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13043 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13044 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
13045 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13046 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13047 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
13048 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13049 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
13050 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13051 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13052 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
13053 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13054 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13055 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13056 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
13057 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13058 tp->fw_needed = FIRMWARE_TG3TSO5;
13060 tp->fw_needed = FIRMWARE_TG3TSO;
13065 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
13066 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
13067 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13068 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13069 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13070 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13071 tp->pdev_peer == tp->pdev))
13072 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
13074 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
13075 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13076 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
13079 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13080 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13081 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13082 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
13083 tp->irq_max = TG3_IRQ_MAX_VECS;
13087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13089 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13090 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
13091 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
13092 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
13093 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
13096 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13097 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13098 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13099 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13101 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13102 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13103 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13104 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13106 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13109 tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13110 if (tp->pcie_cap != 0) {
13113 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13115 pcie_set_readrq(tp->pdev, 4096);
13117 pci_read_config_word(tp->pdev,
13118 tp->pcie_cap + PCI_EXP_LNKCTL,
13120 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13121 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13122 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
13123 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13124 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13125 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13126 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13127 tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
13128 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13129 tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
13131 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13132 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13133 } else if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13134 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13135 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13136 if (!tp->pcix_cap) {
13137 dev_err(&tp->pdev->dev,
13138 "Cannot find PCI-X capability, aborting\n");
13142 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13143 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
13146 /* If we have an AMD 762 or VIA K8T800 chipset, write
13147 * reordering to the mailbox registers done by the host
13148 * controller can cause major troubles. We read back from
13149 * every mailbox register write to force the writes to be
13150 * posted to the chip in order.
13152 if (pci_dev_present(write_reorder_chipsets) &&
13153 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13154 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
13156 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13157 &tp->pci_cacheline_sz);
13158 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13159 &tp->pci_lat_timer);
13160 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13161 tp->pci_lat_timer < 64) {
13162 tp->pci_lat_timer = 64;
13163 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13164 tp->pci_lat_timer);
13167 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13168 /* 5700 BX chips need to have their TX producer index
13169 * mailboxes written twice to workaround a bug.
13171 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
13173 /* If we are in PCI-X mode, enable register write workaround.
13175 * The workaround is to use indirect register accesses
13176 * for all chip writes not to mailbox registers.
13178 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13181 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13183 /* The chip can have it's power management PCI config
13184 * space registers clobbered due to this bug.
13185 * So explicitly force the chip into D0 here.
13187 pci_read_config_dword(tp->pdev,
13188 tp->pm_cap + PCI_PM_CTRL,
13190 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13191 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13192 pci_write_config_dword(tp->pdev,
13193 tp->pm_cap + PCI_PM_CTRL,
13196 /* Also, force SERR#/PERR# in PCI command. */
13197 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13198 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13199 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13203 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13204 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
13205 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13206 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
13208 /* Chip-specific fixup from Broadcom driver */
13209 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13210 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13211 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13212 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13215 /* Default fast path register access methods */
13216 tp->read32 = tg3_read32;
13217 tp->write32 = tg3_write32;
13218 tp->read32_mbox = tg3_read32;
13219 tp->write32_mbox = tg3_write32;
13220 tp->write32_tx_mbox = tg3_write32;
13221 tp->write32_rx_mbox = tg3_write32;
13223 /* Various workaround register access methods */
13224 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
13225 tp->write32 = tg3_write_indirect_reg32;
13226 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13227 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
13228 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13230 * Back to back register writes can cause problems on these
13231 * chips, the workaround is to read back all reg writes
13232 * except those to mailbox regs.
13234 * See tg3_write_indirect_reg32().
13236 tp->write32 = tg3_write_flush_reg32;
13239 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
13240 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
13241 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13242 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
13243 tp->write32_rx_mbox = tg3_write_flush_reg32;
13246 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
13247 tp->read32 = tg3_read_indirect_reg32;
13248 tp->write32 = tg3_write_indirect_reg32;
13249 tp->read32_mbox = tg3_read_indirect_mbox;
13250 tp->write32_mbox = tg3_write_indirect_mbox;
13251 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13252 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13257 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13258 pci_cmd &= ~PCI_COMMAND_MEMORY;
13259 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13261 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13262 tp->read32_mbox = tg3_read32_mbox_5906;
13263 tp->write32_mbox = tg3_write32_mbox_5906;
13264 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13265 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13268 if (tp->write32 == tg3_write_indirect_reg32 ||
13269 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13270 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13271 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13272 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
13274 /* Get eeprom hw config before calling tg3_set_power_state().
13275 * In particular, the TG3_FLG2_IS_NIC flag must be
13276 * determined before calling tg3_set_power_state() so that
13277 * we know whether or not to switch out of Vaux power.
13278 * When the flag is set, it means that GPIO1 is used for eeprom
13279 * write protect and also implies that it is a LOM where GPIOs
13280 * are not used to switch power.
13282 tg3_get_eeprom_hw_cfg(tp);
13284 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13285 /* Allow reads and writes to the
13286 * APE register and memory space.
13288 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13289 PCISTATE_ALLOW_APE_SHMEM_WR |
13290 PCISTATE_ALLOW_APE_PSPACE_WR;
13291 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13295 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13296 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13298 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13299 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13300 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13301 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13302 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13304 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
13305 * GPIO1 driven high will bring 5700's external PHY out of reset.
13306 * It is also used as eeprom write protect on LOMs.
13308 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13309 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13310 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
13311 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13312 GRC_LCLCTRL_GPIO_OUTPUT1);
13313 /* Unused GPIO3 must be driven as output on 5752 because there
13314 * are no pull-up resistors on unused GPIO pins.
13316 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13317 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13319 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13320 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13321 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13322 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13324 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13325 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13326 /* Turn off the debug UART. */
13327 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13328 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
13329 /* Keep VMain power. */
13330 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13331 GRC_LCLCTRL_GPIO_OUTPUT0;
13334 /* Force the chip into D0. */
13335 err = tg3_set_power_state(tp, PCI_D0);
13337 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13341 /* Derive initial jumbo mode from MTU assigned in
13342 * ether_setup() via the alloc_etherdev() call
13344 if (tp->dev->mtu > ETH_DATA_LEN &&
13345 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
13346 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
13348 /* Determine WakeOnLan speed to use. */
13349 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13350 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13351 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13352 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13353 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
13355 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
13358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13359 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
13361 /* A few boards don't want Ethernet@WireSpeed phy feature */
13362 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
13363 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
13364 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13365 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13366 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) ||
13367 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
13368 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
13370 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13371 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13372 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
13373 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13374 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
13376 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
13377 !(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) &&
13378 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13379 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13380 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
13381 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719 &&
13382 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
13383 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13384 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13386 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13387 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13388 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13389 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
13390 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13391 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
13393 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
13396 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13397 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13398 tp->phy_otp = tg3_read_otp_phycfg(tp);
13399 if (tp->phy_otp == 0)
13400 tp->phy_otp = TG3_OTP_DEFAULT;
13403 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
13404 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
13406 tp->mi_mode = MAC_MI_MODE_BASE;
13408 tp->coalesce_mode = 0;
13409 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
13410 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
13411 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
13413 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13414 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
13415 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
13417 err = tg3_mdio_init(tp);
13421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
13422 (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 ||
13423 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
13426 /* Initialize data/descriptor byte/word swapping. */
13427 val = tr32(GRC_MODE);
13428 val &= GRC_MODE_HOST_STACKUP;
13429 tw32(GRC_MODE, val | tp->grc_mode);
13431 tg3_switch_clocks(tp);
13433 /* Clear this out for sanity. */
13434 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
13436 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13438 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
13439 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
13440 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
13442 if (chiprevid == CHIPREV_ID_5701_A0 ||
13443 chiprevid == CHIPREV_ID_5701_B0 ||
13444 chiprevid == CHIPREV_ID_5701_B2 ||
13445 chiprevid == CHIPREV_ID_5701_B5) {
13446 void __iomem *sram_base;
13448 /* Write some dummy words into the SRAM status block
13449 * area, see if it reads back correctly. If the return
13450 * value is bad, force enable the PCIX workaround.
13452 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
13454 writel(0x00000000, sram_base);
13455 writel(0x00000000, sram_base + 4);
13456 writel(0xffffffff, sram_base + 4);
13457 if (readl(sram_base) != 0x00000000)
13458 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
13463 tg3_nvram_init(tp);
13465 grc_misc_cfg = tr32(GRC_MISC_CFG);
13466 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
13468 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13469 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
13470 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
13471 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
13473 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
13474 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
13475 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
13476 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
13477 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
13478 HOSTCC_MODE_CLRTICK_TXBD);
13480 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
13481 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13482 tp->misc_host_ctrl);
13485 /* Preserve the APE MAC_MODE bits */
13486 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13487 tp->mac_mode = tr32(MAC_MODE) |
13488 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13490 tp->mac_mode = TG3_DEF_MAC_MODE;
13492 /* these are limited to 10/100 only */
13493 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13494 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
13495 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13496 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13497 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
13498 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
13499 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
13500 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
13501 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
13502 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
13503 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
13504 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
13505 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13506 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13507 (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
13508 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
13510 err = tg3_phy_probe(tp);
13512 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13513 /* ... but do not return immediately ... */
13518 tg3_read_fw_ver(tp);
13520 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
13521 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13524 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
13526 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
13529 /* 5700 {AX,BX} chips have a broken status block link
13530 * change bit implementation, so we must use the
13531 * status register in those cases.
13533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
13534 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
13536 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
13538 /* The led_ctrl is set during tg3_phy_probe, here we might
13539 * have to force the link status polling mechanism based
13540 * upon subsystem IDs.
13542 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
13543 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13544 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
13545 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
13546 TG3_FLAG_USE_LINKCHG_REG);
13549 /* For all SERDES we poll the MAC status register. */
13550 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
13551 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
13553 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13555 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13556 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13558 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13559 tp->rx_offset -= NET_IP_ALIGN;
13560 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13561 tp->rx_copy_thresh = ~(u16)0;
13565 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13567 /* Increment the rx prod index on the rx std ring by at most
13568 * 8 for these chips to workaround hw errata.
13570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13571 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13572 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13573 tp->rx_std_max_post = 8;
13575 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
13576 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
13577 PCIE_PWR_MGMT_L1_THRESH_MSK;
13582 #ifdef CONFIG_SPARC
13583 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
13585 struct net_device *dev = tp->dev;
13586 struct pci_dev *pdev = tp->pdev;
13587 struct device_node *dp = pci_device_to_OF_node(pdev);
13588 const unsigned char *addr;
13591 addr = of_get_property(dp, "local-mac-address", &len);
13592 if (addr && len == 6) {
13593 memcpy(dev->dev_addr, addr, 6);
13594 memcpy(dev->perm_addr, dev->dev_addr, 6);
13600 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
13602 struct net_device *dev = tp->dev;
13604 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
13605 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
13610 static int __devinit tg3_get_device_address(struct tg3 *tp)
13612 struct net_device *dev = tp->dev;
13613 u32 hi, lo, mac_offset;
13616 #ifdef CONFIG_SPARC
13617 if (!tg3_get_macaddr_sparc(tp))
13622 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
13623 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13624 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
13626 if (tg3_nvram_lock(tp))
13627 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
13629 tg3_nvram_unlock(tp);
13630 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13631 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13632 if (PCI_FUNC(tp->pdev->devfn) & 1)
13634 if (PCI_FUNC(tp->pdev->devfn) > 1)
13635 mac_offset += 0x18c;
13636 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13639 /* First try to get it from MAC address mailbox. */
13640 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
13641 if ((hi >> 16) == 0x484b) {
13642 dev->dev_addr[0] = (hi >> 8) & 0xff;
13643 dev->dev_addr[1] = (hi >> 0) & 0xff;
13645 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
13646 dev->dev_addr[2] = (lo >> 24) & 0xff;
13647 dev->dev_addr[3] = (lo >> 16) & 0xff;
13648 dev->dev_addr[4] = (lo >> 8) & 0xff;
13649 dev->dev_addr[5] = (lo >> 0) & 0xff;
13651 /* Some old bootcode may report a 0 MAC address in SRAM */
13652 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
13655 /* Next, try NVRAM. */
13656 if (!(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) &&
13657 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
13658 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
13659 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
13660 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
13662 /* Finally just fetch it out of the MAC control regs. */
13664 hi = tr32(MAC_ADDR_0_HIGH);
13665 lo = tr32(MAC_ADDR_0_LOW);
13667 dev->dev_addr[5] = lo & 0xff;
13668 dev->dev_addr[4] = (lo >> 8) & 0xff;
13669 dev->dev_addr[3] = (lo >> 16) & 0xff;
13670 dev->dev_addr[2] = (lo >> 24) & 0xff;
13671 dev->dev_addr[1] = hi & 0xff;
13672 dev->dev_addr[0] = (hi >> 8) & 0xff;
13676 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
13677 #ifdef CONFIG_SPARC
13678 if (!tg3_get_default_macaddr_sparc(tp))
13683 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
13687 #define BOUNDARY_SINGLE_CACHELINE 1
13688 #define BOUNDARY_MULTI_CACHELINE 2
13690 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13692 int cacheline_size;
13696 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
13698 cacheline_size = 1024;
13700 cacheline_size = (int) byte * 4;
13702 /* On 5703 and later chips, the boundary bits have no
13705 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13706 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13707 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
13710 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
13711 goal = BOUNDARY_MULTI_CACHELINE;
13713 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
13714 goal = BOUNDARY_SINGLE_CACHELINE;
13720 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13721 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13722 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13723 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13730 /* PCI controllers on most RISC systems tend to disconnect
13731 * when a device tries to burst across a cache-line boundary.
13732 * Therefore, letting tg3 do so just wastes PCI bandwidth.
13734 * Unfortunately, for PCI-E there are only limited
13735 * write-side controls for this, and thus for reads
13736 * we will still get the disconnects. We'll also waste
13737 * these PCI cycles for both read and write for chips
13738 * other than 5700 and 5701 which do not implement the
13741 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
13742 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
13743 switch (cacheline_size) {
13748 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13749 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
13750 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
13752 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13753 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13758 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
13759 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
13763 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
13764 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
13767 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13768 switch (cacheline_size) {
13772 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13773 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13774 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
13780 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
13781 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
13785 switch (cacheline_size) {
13787 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13788 val |= (DMA_RWCTRL_READ_BNDRY_16 |
13789 DMA_RWCTRL_WRITE_BNDRY_16);
13794 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13795 val |= (DMA_RWCTRL_READ_BNDRY_32 |
13796 DMA_RWCTRL_WRITE_BNDRY_32);
13801 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13802 val |= (DMA_RWCTRL_READ_BNDRY_64 |
13803 DMA_RWCTRL_WRITE_BNDRY_64);
13808 if (goal == BOUNDARY_SINGLE_CACHELINE) {
13809 val |= (DMA_RWCTRL_READ_BNDRY_128 |
13810 DMA_RWCTRL_WRITE_BNDRY_128);
13815 val |= (DMA_RWCTRL_READ_BNDRY_256 |
13816 DMA_RWCTRL_WRITE_BNDRY_256);
13819 val |= (DMA_RWCTRL_READ_BNDRY_512 |
13820 DMA_RWCTRL_WRITE_BNDRY_512);
13824 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
13825 DMA_RWCTRL_WRITE_BNDRY_1024);
13834 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
13836 struct tg3_internal_buffer_desc test_desc;
13837 u32 sram_dma_descs;
13840 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
13842 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
13843 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
13844 tw32(RDMAC_STATUS, 0);
13845 tw32(WDMAC_STATUS, 0);
13847 tw32(BUFMGR_MODE, 0);
13848 tw32(FTQ_RESET, 0);
13850 test_desc.addr_hi = ((u64) buf_dma) >> 32;
13851 test_desc.addr_lo = buf_dma & 0xffffffff;
13852 test_desc.nic_mbuf = 0x00002100;
13853 test_desc.len = size;
13856 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
13857 * the *second* time the tg3 driver was getting loaded after an
13860 * Broadcom tells me:
13861 * ...the DMA engine is connected to the GRC block and a DMA
13862 * reset may affect the GRC block in some unpredictable way...
13863 * The behavior of resets to individual blocks has not been tested.
13865 * Broadcom noted the GRC reset will also reset all sub-components.
13868 test_desc.cqid_sqid = (13 << 8) | 2;
13870 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
13873 test_desc.cqid_sqid = (16 << 8) | 7;
13875 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
13878 test_desc.flags = 0x00000005;
13880 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
13883 val = *(((u32 *)&test_desc) + i);
13884 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
13885 sram_dma_descs + (i * sizeof(u32)));
13886 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
13888 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13891 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13893 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13896 for (i = 0; i < 40; i++) {
13900 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
13902 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
13903 if ((val & 0xffff) == sram_dma_descs) {
13914 #define TEST_BUFFER_SIZE 0x2000
13916 static int __devinit tg3_test_dma(struct tg3 *tp)
13918 dma_addr_t buf_dma;
13919 u32 *buf, saved_dma_rwctrl;
13922 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13928 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
13929 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
13931 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13933 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13938 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13939 /* DMA read watermark not used on PCIE */
13940 tp->dma_rwctrl |= 0x00180000;
13941 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13942 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13943 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13944 tp->dma_rwctrl |= 0x003f0000;
13946 tp->dma_rwctrl |= 0x003f000f;
13948 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13949 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13950 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13951 u32 read_water = 0x7;
13953 /* If the 5704 is behind the EPB bridge, we can
13954 * do the less restrictive ONE_DMA workaround for
13955 * better performance.
13957 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13958 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13959 tp->dma_rwctrl |= 0x8000;
13960 else if (ccval == 0x6 || ccval == 0x7)
13961 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13965 /* Set bit 23 to enable PCIX hw bug fix */
13967 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13968 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13970 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13971 /* 5780 always in PCIX mode */
13972 tp->dma_rwctrl |= 0x00144000;
13973 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13974 /* 5714 always in PCIX mode */
13975 tp->dma_rwctrl |= 0x00148000;
13977 tp->dma_rwctrl |= 0x001b000f;
13981 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13982 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13983 tp->dma_rwctrl &= 0xfffffff0;
13985 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13987 /* Remove this if it causes problems for some boards. */
13988 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13990 /* On 5700/5701 chips, we need to set this bit.
13991 * Otherwise the chip will issue cacheline transactions
13992 * to streamable DMA memory with not all the byte
13993 * enables turned on. This is an error on several
13994 * RISC PCI controllers, in particular sparc64.
13996 * On 5703/5704 chips, this bit has been reassigned
13997 * a different meaning. In particular, it is used
13998 * on those chips to enable a PCI-X workaround.
14000 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14003 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14006 /* Unneeded, already done by tg3_get_invariants. */
14007 tg3_switch_clocks(tp);
14010 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14011 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14014 /* It is best to perform DMA test with maximum write burst size
14015 * to expose the 5700/5701 write DMA bug.
14017 saved_dma_rwctrl = tp->dma_rwctrl;
14018 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14019 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14024 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14027 /* Send the buffer to the chip. */
14028 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14030 dev_err(&tp->pdev->dev,
14031 "%s: Buffer write failed. err = %d\n",
14037 /* validate data reached card RAM correctly. */
14038 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14040 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14041 if (le32_to_cpu(val) != p[i]) {
14042 dev_err(&tp->pdev->dev,
14043 "%s: Buffer corrupted on device! "
14044 "(%d != %d)\n", __func__, val, i);
14045 /* ret = -ENODEV here? */
14050 /* Now read it back. */
14051 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14053 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14054 "err = %d\n", __func__, ret);
14059 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14063 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14064 DMA_RWCTRL_WRITE_BNDRY_16) {
14065 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14066 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14067 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14070 dev_err(&tp->pdev->dev,
14071 "%s: Buffer corrupted on read back! "
14072 "(%d != %d)\n", __func__, p[i], i);
14078 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14084 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14085 DMA_RWCTRL_WRITE_BNDRY_16) {
14086 static struct pci_device_id dma_wait_state_chipsets[] = {
14087 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14088 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14092 /* DMA test passed without adjusting DMA boundary,
14093 * now look for chipsets that are known to expose the
14094 * DMA bug without failing the test.
14096 if (pci_dev_present(dma_wait_state_chipsets)) {
14097 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14098 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14100 /* Safe to use the calculated DMA boundary. */
14101 tp->dma_rwctrl = saved_dma_rwctrl;
14104 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14108 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
14113 static void __devinit tg3_init_link_config(struct tg3 *tp)
14115 tp->link_config.advertising =
14116 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
14117 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
14118 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
14119 ADVERTISED_Autoneg | ADVERTISED_MII);
14120 tp->link_config.speed = SPEED_INVALID;
14121 tp->link_config.duplex = DUPLEX_INVALID;
14122 tp->link_config.autoneg = AUTONEG_ENABLE;
14123 tp->link_config.active_speed = SPEED_INVALID;
14124 tp->link_config.active_duplex = DUPLEX_INVALID;
14125 tp->link_config.phy_is_low_power = 0;
14126 tp->link_config.orig_speed = SPEED_INVALID;
14127 tp->link_config.orig_duplex = DUPLEX_INVALID;
14128 tp->link_config.orig_autoneg = AUTONEG_INVALID;
14131 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14133 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14134 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14135 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14136 tp->bufmgr_config.mbuf_read_dma_low_water =
14137 DEFAULT_MB_RDMA_LOW_WATER_5705;
14138 tp->bufmgr_config.mbuf_mac_rx_low_water =
14139 DEFAULT_MB_MACRX_LOW_WATER_57765;
14140 tp->bufmgr_config.mbuf_high_water =
14141 DEFAULT_MB_HIGH_WATER_57765;
14143 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14144 DEFAULT_MB_RDMA_LOW_WATER_5705;
14145 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14146 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14147 tp->bufmgr_config.mbuf_high_water_jumbo =
14148 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14149 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14150 tp->bufmgr_config.mbuf_read_dma_low_water =
14151 DEFAULT_MB_RDMA_LOW_WATER_5705;
14152 tp->bufmgr_config.mbuf_mac_rx_low_water =
14153 DEFAULT_MB_MACRX_LOW_WATER_5705;
14154 tp->bufmgr_config.mbuf_high_water =
14155 DEFAULT_MB_HIGH_WATER_5705;
14156 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14157 tp->bufmgr_config.mbuf_mac_rx_low_water =
14158 DEFAULT_MB_MACRX_LOW_WATER_5906;
14159 tp->bufmgr_config.mbuf_high_water =
14160 DEFAULT_MB_HIGH_WATER_5906;
14163 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14164 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14165 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14166 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14167 tp->bufmgr_config.mbuf_high_water_jumbo =
14168 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14170 tp->bufmgr_config.mbuf_read_dma_low_water =
14171 DEFAULT_MB_RDMA_LOW_WATER;
14172 tp->bufmgr_config.mbuf_mac_rx_low_water =
14173 DEFAULT_MB_MACRX_LOW_WATER;
14174 tp->bufmgr_config.mbuf_high_water =
14175 DEFAULT_MB_HIGH_WATER;
14177 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14178 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14179 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14180 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14181 tp->bufmgr_config.mbuf_high_water_jumbo =
14182 DEFAULT_MB_HIGH_WATER_JUMBO;
14185 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14186 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14189 static char * __devinit tg3_phy_string(struct tg3 *tp)
14191 switch (tp->phy_id & TG3_PHY_ID_MASK) {
14192 case TG3_PHY_ID_BCM5400: return "5400";
14193 case TG3_PHY_ID_BCM5401: return "5401";
14194 case TG3_PHY_ID_BCM5411: return "5411";
14195 case TG3_PHY_ID_BCM5701: return "5701";
14196 case TG3_PHY_ID_BCM5703: return "5703";
14197 case TG3_PHY_ID_BCM5704: return "5704";
14198 case TG3_PHY_ID_BCM5705: return "5705";
14199 case TG3_PHY_ID_BCM5750: return "5750";
14200 case TG3_PHY_ID_BCM5752: return "5752";
14201 case TG3_PHY_ID_BCM5714: return "5714";
14202 case TG3_PHY_ID_BCM5780: return "5780";
14203 case TG3_PHY_ID_BCM5755: return "5755";
14204 case TG3_PHY_ID_BCM5787: return "5787";
14205 case TG3_PHY_ID_BCM5784: return "5784";
14206 case TG3_PHY_ID_BCM5756: return "5722/5756";
14207 case TG3_PHY_ID_BCM5906: return "5906";
14208 case TG3_PHY_ID_BCM5761: return "5761";
14209 case TG3_PHY_ID_BCM5718C: return "5718C";
14210 case TG3_PHY_ID_BCM5718S: return "5718S";
14211 case TG3_PHY_ID_BCM57765: return "57765";
14212 case TG3_PHY_ID_BCM5719C: return "5719C";
14213 case TG3_PHY_ID_BCM8002: return "8002/serdes";
14214 case 0: return "serdes";
14215 default: return "unknown";
14219 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14221 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
14222 strcpy(str, "PCI Express");
14224 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
14225 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14227 strcpy(str, "PCIX:");
14229 if ((clock_ctrl == 7) ||
14230 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14231 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14232 strcat(str, "133MHz");
14233 else if (clock_ctrl == 0)
14234 strcat(str, "33MHz");
14235 else if (clock_ctrl == 2)
14236 strcat(str, "50MHz");
14237 else if (clock_ctrl == 4)
14238 strcat(str, "66MHz");
14239 else if (clock_ctrl == 6)
14240 strcat(str, "100MHz");
14242 strcpy(str, "PCI:");
14243 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
14244 strcat(str, "66MHz");
14246 strcat(str, "33MHz");
14248 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
14249 strcat(str, ":32-bit");
14251 strcat(str, ":64-bit");
14255 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14257 struct pci_dev *peer;
14258 unsigned int func, devnr = tp->pdev->devfn & ~7;
14260 for (func = 0; func < 8; func++) {
14261 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14262 if (peer && peer != tp->pdev)
14266 /* 5704 can be configured in single-port mode, set peer to
14267 * tp->pdev in that case.
14275 * We don't need to keep the refcount elevated; there's no way
14276 * to remove one half of this device without removing the other
14283 static void __devinit tg3_init_coal(struct tg3 *tp)
14285 struct ethtool_coalesce *ec = &tp->coal;
14287 memset(ec, 0, sizeof(*ec));
14288 ec->cmd = ETHTOOL_GCOALESCE;
14289 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14290 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14291 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14292 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14293 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14294 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14295 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14296 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14297 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14299 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14300 HOSTCC_MODE_CLRTICK_TXBD)) {
14301 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14302 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14303 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14304 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14307 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
14308 ec->rx_coalesce_usecs_irq = 0;
14309 ec->tx_coalesce_usecs_irq = 0;
14310 ec->stats_block_coalesce_usecs = 0;
14314 static const struct net_device_ops tg3_netdev_ops = {
14315 .ndo_open = tg3_open,
14316 .ndo_stop = tg3_close,
14317 .ndo_start_xmit = tg3_start_xmit,
14318 .ndo_get_stats64 = tg3_get_stats64,
14319 .ndo_validate_addr = eth_validate_addr,
14320 .ndo_set_multicast_list = tg3_set_rx_mode,
14321 .ndo_set_mac_address = tg3_set_mac_addr,
14322 .ndo_do_ioctl = tg3_ioctl,
14323 .ndo_tx_timeout = tg3_tx_timeout,
14324 .ndo_change_mtu = tg3_change_mtu,
14325 #if TG3_VLAN_TAG_USED
14326 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14328 #ifdef CONFIG_NET_POLL_CONTROLLER
14329 .ndo_poll_controller = tg3_poll_controller,
14333 static const struct net_device_ops tg3_netdev_ops_dma_bug = {
14334 .ndo_open = tg3_open,
14335 .ndo_stop = tg3_close,
14336 .ndo_start_xmit = tg3_start_xmit_dma_bug,
14337 .ndo_get_stats64 = tg3_get_stats64,
14338 .ndo_validate_addr = eth_validate_addr,
14339 .ndo_set_multicast_list = tg3_set_rx_mode,
14340 .ndo_set_mac_address = tg3_set_mac_addr,
14341 .ndo_do_ioctl = tg3_ioctl,
14342 .ndo_tx_timeout = tg3_tx_timeout,
14343 .ndo_change_mtu = tg3_change_mtu,
14344 #if TG3_VLAN_TAG_USED
14345 .ndo_vlan_rx_register = tg3_vlan_rx_register,
14347 #ifdef CONFIG_NET_POLL_CONTROLLER
14348 .ndo_poll_controller = tg3_poll_controller,
14352 static int __devinit tg3_init_one(struct pci_dev *pdev,
14353 const struct pci_device_id *ent)
14355 struct net_device *dev;
14357 int i, err, pm_cap;
14358 u32 sndmbx, rcvmbx, intmbx;
14360 u64 dma_mask, persist_dma_mask;
14362 printk_once(KERN_INFO "%s\n", version);
14364 err = pci_enable_device(pdev);
14366 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14370 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14372 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14373 goto err_out_disable_pdev;
14376 pci_set_master(pdev);
14378 /* Find power-management capability. */
14379 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14381 dev_err(&pdev->dev,
14382 "Cannot find Power Management capability, aborting\n");
14384 goto err_out_free_res;
14387 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14389 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14391 goto err_out_free_res;
14394 SET_NETDEV_DEV(dev, &pdev->dev);
14396 #if TG3_VLAN_TAG_USED
14397 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
14400 tp = netdev_priv(dev);
14403 tp->pm_cap = pm_cap;
14404 tp->rx_mode = TG3_DEF_RX_MODE;
14405 tp->tx_mode = TG3_DEF_TX_MODE;
14408 tp->msg_enable = tg3_debug;
14410 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14412 /* The word/byte swap controls here control register access byte
14413 * swapping. DMA data byte swapping is controlled in the GRC_MODE
14416 tp->misc_host_ctrl =
14417 MISC_HOST_CTRL_MASK_PCI_INT |
14418 MISC_HOST_CTRL_WORD_SWAP |
14419 MISC_HOST_CTRL_INDIR_ACCESS |
14420 MISC_HOST_CTRL_PCISTATE_RW;
14422 /* The NONFRM (non-frame) byte/word swap controls take effect
14423 * on descriptor entries, anything which isn't packet data.
14425 * The StrongARM chips on the board (one for tx, one for rx)
14426 * are running in big-endian mode.
14428 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14429 GRC_MODE_WSWAP_NONFRM_DATA);
14430 #ifdef __BIG_ENDIAN
14431 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14433 spin_lock_init(&tp->lock);
14434 spin_lock_init(&tp->indirect_lock);
14435 INIT_WORK(&tp->reset_task, tg3_reset_task);
14437 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14439 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14441 goto err_out_free_dev;
14444 tg3_init_link_config(tp);
14446 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
14447 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
14449 dev->ethtool_ops = &tg3_ethtool_ops;
14450 dev->watchdog_timeo = TG3_TX_TIMEOUT;
14451 dev->irq = pdev->irq;
14453 err = tg3_get_invariants(tp);
14455 dev_err(&pdev->dev,
14456 "Problem fetching invariants of chip, aborting\n");
14457 goto err_out_iounmap;
14460 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
14461 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0 &&
14462 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
14463 dev->netdev_ops = &tg3_netdev_ops;
14465 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
14468 /* The EPB bridge inside 5714, 5715, and 5780 and any
14469 * device behind the EPB cannot support DMA addresses > 40-bit.
14470 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
14471 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
14472 * do DMA address check in tg3_start_xmit().
14474 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
14475 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
14476 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
14477 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
14478 #ifdef CONFIG_HIGHMEM
14479 dma_mask = DMA_BIT_MASK(64);
14482 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
14484 /* Configure DMA attributes. */
14485 if (dma_mask > DMA_BIT_MASK(32)) {
14486 err = pci_set_dma_mask(pdev, dma_mask);
14488 dev->features |= NETIF_F_HIGHDMA;
14489 err = pci_set_consistent_dma_mask(pdev,
14492 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14493 "DMA for consistent allocations\n");
14494 goto err_out_iounmap;
14498 if (err || dma_mask == DMA_BIT_MASK(32)) {
14499 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14501 dev_err(&pdev->dev,
14502 "No usable DMA configuration, aborting\n");
14503 goto err_out_iounmap;
14507 tg3_init_bufmgr_config(tp);
14509 /* Selectively allow TSO based on operating conditions */
14510 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14511 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14512 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14514 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14515 tp->fw_needed = NULL;
14518 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14519 tp->fw_needed = FIRMWARE_TG3;
14521 /* TSO is on by default on chips that support hardware TSO.
14522 * Firmware TSO on older chips gives lower performance, so it
14523 * is off by default, but can be enabled using ethtool.
14525 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14526 (dev->features & NETIF_F_IP_CSUM)) {
14527 dev->features |= NETIF_F_TSO;
14528 vlan_features_add(dev, NETIF_F_TSO);
14530 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14531 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14532 if (dev->features & NETIF_F_IPV6_CSUM) {
14533 dev->features |= NETIF_F_TSO6;
14534 vlan_features_add(dev, NETIF_F_TSO6);
14536 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14537 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14538 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14539 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14540 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14541 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14542 dev->features |= NETIF_F_TSO_ECN;
14543 vlan_features_add(dev, NETIF_F_TSO_ECN);
14547 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14548 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14549 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
14550 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
14551 tp->rx_pending = 63;
14554 err = tg3_get_device_address(tp);
14556 dev_err(&pdev->dev,
14557 "Could not obtain valid ethernet address, aborting\n");
14558 goto err_out_iounmap;
14561 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14562 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14563 if (!tp->aperegs) {
14564 dev_err(&pdev->dev,
14565 "Cannot map APE registers, aborting\n");
14567 goto err_out_iounmap;
14570 tg3_ape_lock_init(tp);
14572 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
14573 tg3_read_dash_ver(tp);
14577 * Reset chip in case UNDI or EFI driver did not shutdown
14578 * DMA self test will enable WDMAC and we'll see (spurious)
14579 * pending DMA on the PCI bus at that point.
14581 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
14582 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
14583 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
14584 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14587 err = tg3_test_dma(tp);
14589 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14590 goto err_out_apeunmap;
14593 /* flow control autonegotiation is default behavior */
14594 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14595 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14597 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14598 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14599 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14600 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14601 struct tg3_napi *tnapi = &tp->napi[i];
14604 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14606 tnapi->int_mbox = intmbx;
14612 tnapi->consmbox = rcvmbx;
14613 tnapi->prodmbox = sndmbx;
14616 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14617 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14619 tnapi->coal_now = HOSTCC_MODE_NOW;
14620 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14623 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14627 * If we support MSIX, we'll be using RSS. If we're using
14628 * RSS, the first vector only handles link interrupts and the
14629 * remaining vectors handle rx and tx interrupts. Reuse the
14630 * mailbox values for the next iteration. The values we setup
14631 * above are still useful for the single vectored mode.
14646 pci_set_drvdata(pdev, dev);
14648 err = register_netdev(dev);
14650 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14651 goto err_out_apeunmap;
14654 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
14655 tp->board_part_number,
14656 tp->pci_chip_rev_id,
14657 tg3_bus_string(tp, str),
14660 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14661 struct phy_device *phydev;
14662 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14664 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14665 phydev->drv->name, dev_name(&phydev->dev));
14667 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14668 "(WireSpeed[%d])\n", tg3_phy_string(tp),
14669 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14670 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14671 "10/100/1000Base-T")),
14672 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
14674 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
14675 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
14676 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
14677 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
14678 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
14679 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
14680 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
14682 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
14683 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
14689 iounmap(tp->aperegs);
14690 tp->aperegs = NULL;
14703 pci_release_regions(pdev);
14705 err_out_disable_pdev:
14706 pci_disable_device(pdev);
14707 pci_set_drvdata(pdev, NULL);
14711 static void __devexit tg3_remove_one(struct pci_dev *pdev)
14713 struct net_device *dev = pci_get_drvdata(pdev);
14716 struct tg3 *tp = netdev_priv(dev);
14719 release_firmware(tp->fw);
14721 flush_scheduled_work();
14723 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14728 unregister_netdev(dev);
14730 iounmap(tp->aperegs);
14731 tp->aperegs = NULL;
14738 pci_release_regions(pdev);
14739 pci_disable_device(pdev);
14740 pci_set_drvdata(pdev, NULL);
14744 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
14746 struct net_device *dev = pci_get_drvdata(pdev);
14747 struct tg3 *tp = netdev_priv(dev);
14748 pci_power_t target_state;
14751 /* PCI register 4 needs to be saved whether netif_running() or not.
14752 * MSI address and data need to be saved if using MSI and
14755 pci_save_state(pdev);
14757 if (!netif_running(dev))
14760 flush_scheduled_work();
14762 tg3_netif_stop(tp);
14764 del_timer_sync(&tp->timer);
14766 tg3_full_lock(tp, 1);
14767 tg3_disable_ints(tp);
14768 tg3_full_unlock(tp);
14770 netif_device_detach(dev);
14772 tg3_full_lock(tp, 0);
14773 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14774 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
14775 tg3_full_unlock(tp);
14777 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
14779 err = tg3_set_power_state(tp, target_state);
14783 tg3_full_lock(tp, 0);
14785 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14786 err2 = tg3_restart_hw(tp, 1);
14790 tp->timer.expires = jiffies + tp->timer_offset;
14791 add_timer(&tp->timer);
14793 netif_device_attach(dev);
14794 tg3_netif_start(tp);
14797 tg3_full_unlock(tp);
14806 static int tg3_resume(struct pci_dev *pdev)
14808 struct net_device *dev = pci_get_drvdata(pdev);
14809 struct tg3 *tp = netdev_priv(dev);
14812 pci_restore_state(tp->pdev);
14814 if (!netif_running(dev))
14817 err = tg3_set_power_state(tp, PCI_D0);
14821 netif_device_attach(dev);
14823 tg3_full_lock(tp, 0);
14825 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
14826 err = tg3_restart_hw(tp, 1);
14830 tp->timer.expires = jiffies + tp->timer_offset;
14831 add_timer(&tp->timer);
14833 tg3_netif_start(tp);
14836 tg3_full_unlock(tp);
14844 static struct pci_driver tg3_driver = {
14845 .name = DRV_MODULE_NAME,
14846 .id_table = tg3_pci_tbl,
14847 .probe = tg3_init_one,
14848 .remove = __devexit_p(tg3_remove_one),
14849 .suspend = tg3_suspend,
14850 .resume = tg3_resume
14853 static int __init tg3_init(void)
14855 return pci_register_driver(&tg3_driver);
14858 static void __exit tg3_cleanup(void)
14860 pci_unregister_driver(&tg3_driver);
14863 module_init(tg3_init);
14864 module_exit(tg3_cleanup);