2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #if IS_ENABLED(CONFIG_HWMON)
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
52 #include <net/checksum.h>
56 #include <asm/byteorder.h>
57 #include <linux/uaccess.h>
60 #include <asm/idprom.h>
69 /* Functions & macros to verify TG3_FLAGS types */
71 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 return test_bit(flag, bits);
76 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
81 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 clear_bit(flag, bits);
86 #define tg3_flag(tp, flag) \
87 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
88 #define tg3_flag_set(tp, flag) \
89 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_clear(tp, flag) \
91 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93 #define DRV_MODULE_NAME "tg3"
95 #define TG3_MIN_NUM 123
96 #define DRV_MODULE_VERSION \
97 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
98 #define DRV_MODULE_RELDATE "March 21, 2012"
100 #define RESET_KIND_SHUTDOWN 0
101 #define RESET_KIND_INIT 1
102 #define RESET_KIND_SUSPEND 2
104 #define TG3_DEF_RX_MODE 0
105 #define TG3_DEF_TX_MODE 0
106 #define TG3_DEF_MSG_ENABLE \
116 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
118 /* length of time before we decide the hardware is borked,
119 * and dev->tx_timeout() should be called to fix the problem
122 #define TG3_TX_TIMEOUT (5 * HZ)
124 /* hardware minimum and maximum for a single frame's data payload */
125 #define TG3_MIN_MTU 60
126 #define TG3_MAX_MTU(tp) \
127 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129 /* These numbers seem to be hard coded in the NIC firmware somehow.
130 * You can't change the ring sizes, but you can change where you place
131 * them in the NIC onboard memory.
133 #define TG3_RX_STD_RING_SIZE(tp) \
134 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
135 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
136 #define TG3_DEF_RX_RING_PENDING 200
137 #define TG3_RX_JMB_RING_SIZE(tp) \
138 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
139 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
140 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
142 /* Do not place this n-ring entries value into the tp struct itself,
143 * we really want to expose these constants to GCC so that modulo et
144 * al. operations are done with shifts and masks instead of with
145 * hw multiply/modulo instructions. Another solution would be to
146 * replace things like '% foo' with '& (foo - 1)'.
149 #define TG3_TX_RING_SIZE 512
150 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
152 #define TG3_RX_STD_RING_BYTES(tp) \
153 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
154 #define TG3_RX_JMB_RING_BYTES(tp) \
155 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
156 #define TG3_RX_RCB_RING_BYTES(tp) \
157 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
158 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
160 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162 #define TG3_DMA_BYTE_ENAB 64
164 #define TG3_RX_STD_DMA_SZ 1536
165 #define TG3_RX_JMB_DMA_SZ 9046
167 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
169 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
170 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
176 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
179 * that are at least dword aligned when used in PCIX mode. The driver
180 * works around this bug by double copying the packet. This workaround
181 * is built into the normal double copy length check for efficiency.
183 * However, the double copy is only necessary on those architectures
184 * where unaligned memory accesses are inefficient. For those architectures
185 * where unaligned memory accesses incur little penalty, we can reintegrate
186 * the 5701 in the normal rx path. Doing so saves a device structure
187 * dereference by hardcoding the double copy threshold in place.
189 #define TG3_RX_COPY_THRESHOLD 256
190 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
191 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
193 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
196 #if (NET_IP_ALIGN != 0)
197 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
199 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
202 /* minimum number of free TX descriptors required to wake up TX process */
203 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
204 #define TG3_TX_BD_DMA_MAX_2K 2048
205 #define TG3_TX_BD_DMA_MAX_4K 4096
207 #define TG3_RAW_IP_ALIGN 2
209 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
210 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212 #define FIRMWARE_TG3 "tigon/tg3.bin"
213 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
214 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
216 static char version[] __devinitdata =
217 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
220 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
221 MODULE_LICENSE("GPL");
222 MODULE_VERSION(DRV_MODULE_VERSION);
223 MODULE_FIRMWARE(FIRMWARE_TG3);
224 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
228 module_param(tg3_debug, int, 0);
229 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
306 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
307 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
308 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
309 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
310 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
311 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
312 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
313 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
317 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
319 static const struct {
320 const char string[ETH_GSTRING_LEN];
321 } ethtool_stats_keys[] = {
324 { "rx_ucast_packets" },
325 { "rx_mcast_packets" },
326 { "rx_bcast_packets" },
328 { "rx_align_errors" },
329 { "rx_xon_pause_rcvd" },
330 { "rx_xoff_pause_rcvd" },
331 { "rx_mac_ctrl_rcvd" },
332 { "rx_xoff_entered" },
333 { "rx_frame_too_long_errors" },
335 { "rx_undersize_packets" },
336 { "rx_in_length_errors" },
337 { "rx_out_length_errors" },
338 { "rx_64_or_less_octet_packets" },
339 { "rx_65_to_127_octet_packets" },
340 { "rx_128_to_255_octet_packets" },
341 { "rx_256_to_511_octet_packets" },
342 { "rx_512_to_1023_octet_packets" },
343 { "rx_1024_to_1522_octet_packets" },
344 { "rx_1523_to_2047_octet_packets" },
345 { "rx_2048_to_4095_octet_packets" },
346 { "rx_4096_to_8191_octet_packets" },
347 { "rx_8192_to_9022_octet_packets" },
354 { "tx_flow_control" },
356 { "tx_single_collisions" },
357 { "tx_mult_collisions" },
359 { "tx_excessive_collisions" },
360 { "tx_late_collisions" },
361 { "tx_collide_2times" },
362 { "tx_collide_3times" },
363 { "tx_collide_4times" },
364 { "tx_collide_5times" },
365 { "tx_collide_6times" },
366 { "tx_collide_7times" },
367 { "tx_collide_8times" },
368 { "tx_collide_9times" },
369 { "tx_collide_10times" },
370 { "tx_collide_11times" },
371 { "tx_collide_12times" },
372 { "tx_collide_13times" },
373 { "tx_collide_14times" },
374 { "tx_collide_15times" },
375 { "tx_ucast_packets" },
376 { "tx_mcast_packets" },
377 { "tx_bcast_packets" },
378 { "tx_carrier_sense_errors" },
382 { "dma_writeq_full" },
383 { "dma_write_prioq_full" },
387 { "rx_threshold_hit" },
389 { "dma_readq_full" },
390 { "dma_read_prioq_full" },
391 { "tx_comp_queue_full" },
393 { "ring_set_send_prod_index" },
394 { "ring_status_update" },
396 { "nic_avoided_irqs" },
397 { "nic_tx_threshold_hit" },
399 { "mbuf_lwm_thresh_hit" },
402 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
405 static const struct {
406 const char string[ETH_GSTRING_LEN];
407 } ethtool_test_keys[] = {
408 { "nvram test (online) " },
409 { "link test (online) " },
410 { "register test (offline)" },
411 { "memory test (offline)" },
412 { "mac loopback test (offline)" },
413 { "phy loopback test (offline)" },
414 { "ext loopback test (offline)" },
415 { "interrupt test (offline)" },
418 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
421 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
423 writel(val, tp->regs + off);
426 static u32 tg3_read32(struct tg3 *tp, u32 off)
428 return readl(tp->regs + off);
431 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
433 writel(val, tp->aperegs + off);
436 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
438 return readl(tp->aperegs + off);
441 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
445 spin_lock_irqsave(&tp->indirect_lock, flags);
446 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
447 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
448 spin_unlock_irqrestore(&tp->indirect_lock, flags);
451 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
453 writel(val, tp->regs + off);
454 readl(tp->regs + off);
457 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
462 spin_lock_irqsave(&tp->indirect_lock, flags);
463 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
464 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
465 spin_unlock_irqrestore(&tp->indirect_lock, flags);
469 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
473 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
474 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
475 TG3_64BIT_REG_LOW, val);
478 if (off == TG3_RX_STD_PROD_IDX_REG) {
479 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
480 TG3_64BIT_REG_LOW, val);
484 spin_lock_irqsave(&tp->indirect_lock, flags);
485 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
487 spin_unlock_irqrestore(&tp->indirect_lock, flags);
489 /* In indirect mode when disabling interrupts, we also need
490 * to clear the interrupt bit in the GRC local ctrl register.
492 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
494 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
495 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
499 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 /* usec_wait specifies the wait time in usec when writing to certain registers
512 * where it is unsafe to read back the register without some delay.
513 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
514 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
516 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
518 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
519 /* Non-posted methods */
520 tp->write32(tp, off, val);
523 tg3_write32(tp, off, val);
528 /* Wait again after the read for the posted method to guarantee that
529 * the wait time is met.
535 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
537 tp->write32_mbox(tp, off, val);
538 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
539 tp->read32_mbox(tp, off);
542 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
544 void __iomem *mbox = tp->regs + off;
546 if (tg3_flag(tp, TXD_MBOX_HWBUG))
548 if (tg3_flag(tp, MBOX_WRITE_REORDER))
552 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
554 return readl(tp->regs + off + GRCMBOX_BASE);
557 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
559 writel(val, tp->regs + off + GRCMBOX_BASE);
562 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
563 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
564 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
565 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
566 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
568 #define tw32(reg, val) tp->write32(tp, reg, val)
569 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
570 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
571 #define tr32(reg) tp->read32(tp, reg)
573 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
577 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
578 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
581 spin_lock_irqsave(&tp->indirect_lock, flags);
582 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
584 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
586 /* Always leave this as zero. */
587 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
590 tw32_f(TG3PCI_MEM_WIN_DATA, val);
592 /* Always leave this as zero. */
593 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
595 spin_unlock_irqrestore(&tp->indirect_lock, flags);
598 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
603 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
608 spin_lock_irqsave(&tp->indirect_lock, flags);
609 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
611 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
613 /* Always leave this as zero. */
614 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
617 *val = tr32(TG3PCI_MEM_WIN_DATA);
619 /* Always leave this as zero. */
620 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
622 spin_unlock_irqrestore(&tp->indirect_lock, flags);
625 static void tg3_ape_lock_init(struct tg3 *tp)
630 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
631 regbase = TG3_APE_LOCK_GRANT;
633 regbase = TG3_APE_PER_LOCK_GRANT;
635 /* Make sure the driver hasn't any stale locks. */
636 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
638 case TG3_APE_LOCK_PHY0:
639 case TG3_APE_LOCK_PHY1:
640 case TG3_APE_LOCK_PHY2:
641 case TG3_APE_LOCK_PHY3:
642 bit = APE_LOCK_GRANT_DRIVER;
646 bit = APE_LOCK_GRANT_DRIVER;
648 bit = 1 << tp->pci_fn;
650 tg3_ape_write32(tp, regbase + 4 * i, bit);
655 static int tg3_ape_lock(struct tg3 *tp, int locknum)
659 u32 status, req, gnt, bit;
661 if (!tg3_flag(tp, ENABLE_APE))
665 case TG3_APE_LOCK_GPIO:
666 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
668 case TG3_APE_LOCK_GRC:
669 case TG3_APE_LOCK_MEM:
671 bit = APE_LOCK_REQ_DRIVER;
673 bit = 1 << tp->pci_fn;
679 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
680 req = TG3_APE_LOCK_REQ;
681 gnt = TG3_APE_LOCK_GRANT;
683 req = TG3_APE_PER_LOCK_REQ;
684 gnt = TG3_APE_PER_LOCK_GRANT;
689 tg3_ape_write32(tp, req + off, bit);
691 /* Wait for up to 1 millisecond to acquire lock. */
692 for (i = 0; i < 100; i++) {
693 status = tg3_ape_read32(tp, gnt + off);
700 /* Revoke the lock request. */
701 tg3_ape_write32(tp, gnt + off, bit);
708 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
712 if (!tg3_flag(tp, ENABLE_APE))
716 case TG3_APE_LOCK_GPIO:
717 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
719 case TG3_APE_LOCK_GRC:
720 case TG3_APE_LOCK_MEM:
722 bit = APE_LOCK_GRANT_DRIVER;
724 bit = 1 << tp->pci_fn;
730 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
731 gnt = TG3_APE_LOCK_GRANT;
733 gnt = TG3_APE_PER_LOCK_GRANT;
735 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
738 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
743 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
746 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
747 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
750 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
753 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
756 return timeout_us ? 0 : -EBUSY;
759 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
763 for (i = 0; i < timeout_us / 10; i++) {
764 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
766 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
772 return i == timeout_us / 10;
775 int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off, u32 len)
778 u32 i, bufoff, msgoff, maxlen, apedata;
780 if (!tg3_flag(tp, APE_HAS_NCSI))
783 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
784 if (apedata != APE_SEG_SIG_MAGIC)
787 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
788 if (!(apedata & APE_FW_STATUS_READY))
791 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
793 msgoff = bufoff + 2 * sizeof(u32);
794 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
799 /* Cap xfer sizes to scratchpad limits. */
800 length = (len > maxlen) ? maxlen : len;
803 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
804 if (!(apedata & APE_FW_STATUS_READY))
807 /* Wait for up to 1 msec for APE to service previous event. */
808 err = tg3_ape_event_lock(tp, 1000);
812 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
813 APE_EVENT_STATUS_SCRTCHPD_READ |
814 APE_EVENT_STATUS_EVENT_PENDING;
815 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
817 tg3_ape_write32(tp, bufoff, base_off);
818 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
820 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
821 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
825 if (tg3_ape_wait_for_event(tp, 30000))
828 for (i = 0; length; i += 4, length -= 4) {
829 u32 val = tg3_ape_read32(tp, msgoff + i);
830 memcpy(data, &val, sizeof(u32));
838 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
843 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
844 if (apedata != APE_SEG_SIG_MAGIC)
847 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
848 if (!(apedata & APE_FW_STATUS_READY))
851 /* Wait for up to 1 millisecond for APE to service previous event. */
852 err = tg3_ape_event_lock(tp, 1000);
856 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
857 event | APE_EVENT_STATUS_EVENT_PENDING);
859 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
860 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
865 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
870 if (!tg3_flag(tp, ENABLE_APE))
874 case RESET_KIND_INIT:
875 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
876 APE_HOST_SEG_SIG_MAGIC);
877 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
878 APE_HOST_SEG_LEN_MAGIC);
879 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
880 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
881 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
882 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
883 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
884 APE_HOST_BEHAV_NO_PHYLOCK);
885 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
886 TG3_APE_HOST_DRVR_STATE_START);
888 event = APE_EVENT_STATUS_STATE_START;
890 case RESET_KIND_SHUTDOWN:
891 /* With the interface we are currently using,
892 * APE does not track driver state. Wiping
893 * out the HOST SEGMENT SIGNATURE forces
894 * the APE to assume OS absent status.
896 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
898 if (device_may_wakeup(&tp->pdev->dev) &&
899 tg3_flag(tp, WOL_ENABLE)) {
900 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
901 TG3_APE_HOST_WOL_SPEED_AUTO);
902 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
904 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
906 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
908 event = APE_EVENT_STATUS_STATE_UNLOAD;
910 case RESET_KIND_SUSPEND:
911 event = APE_EVENT_STATUS_STATE_SUSPEND;
917 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
919 tg3_ape_send_event(tp, event);
922 static void tg3_disable_ints(struct tg3 *tp)
926 tw32(TG3PCI_MISC_HOST_CTRL,
927 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
928 for (i = 0; i < tp->irq_max; i++)
929 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
932 static void tg3_enable_ints(struct tg3 *tp)
939 tw32(TG3PCI_MISC_HOST_CTRL,
940 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
942 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
943 for (i = 0; i < tp->irq_cnt; i++) {
944 struct tg3_napi *tnapi = &tp->napi[i];
946 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
947 if (tg3_flag(tp, 1SHOT_MSI))
948 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
950 tp->coal_now |= tnapi->coal_now;
953 /* Force an initial interrupt */
954 if (!tg3_flag(tp, TAGGED_STATUS) &&
955 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
956 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
958 tw32(HOSTCC_MODE, tp->coal_now);
960 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
963 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
965 struct tg3 *tp = tnapi->tp;
966 struct tg3_hw_status *sblk = tnapi->hw_status;
967 unsigned int work_exists = 0;
969 /* check for phy events */
970 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
971 if (sblk->status & SD_STATUS_LINK_CHG)
975 /* check for TX work to do */
976 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
979 /* check for RX work to do */
980 if (tnapi->rx_rcb_prod_idx &&
981 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
988 * similar to tg3_enable_ints, but it accurately determines whether there
989 * is new work pending and can return without flushing the PIO write
990 * which reenables interrupts
992 static void tg3_int_reenable(struct tg3_napi *tnapi)
994 struct tg3 *tp = tnapi->tp;
996 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
999 /* When doing tagged status, this work check is unnecessary.
1000 * The last_tag we write above tells the chip which piece of
1001 * work we've completed.
1003 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1004 tw32(HOSTCC_MODE, tp->coalesce_mode |
1005 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1008 static void tg3_switch_clocks(struct tg3 *tp)
1011 u32 orig_clock_ctrl;
1013 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1016 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1018 orig_clock_ctrl = clock_ctrl;
1019 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1020 CLOCK_CTRL_CLKRUN_OENABLE |
1022 tp->pci_clock_ctrl = clock_ctrl;
1024 if (tg3_flag(tp, 5705_PLUS)) {
1025 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1026 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1027 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1029 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1030 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1032 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1034 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1035 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1038 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1041 #define PHY_BUSY_LOOPS 5000
1043 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1049 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1051 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1057 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1058 MI_COM_PHY_ADDR_MASK);
1059 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1060 MI_COM_REG_ADDR_MASK);
1061 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1063 tw32_f(MAC_MI_COM, frame_val);
1065 loops = PHY_BUSY_LOOPS;
1066 while (loops != 0) {
1068 frame_val = tr32(MAC_MI_COM);
1070 if ((frame_val & MI_COM_BUSY) == 0) {
1072 frame_val = tr32(MAC_MI_COM);
1080 *val = frame_val & MI_COM_DATA_MASK;
1084 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1085 tw32_f(MAC_MI_MODE, tp->mi_mode);
1092 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1098 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1099 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1102 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1104 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1108 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1109 MI_COM_PHY_ADDR_MASK);
1110 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1111 MI_COM_REG_ADDR_MASK);
1112 frame_val |= (val & MI_COM_DATA_MASK);
1113 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1115 tw32_f(MAC_MI_COM, frame_val);
1117 loops = PHY_BUSY_LOOPS;
1118 while (loops != 0) {
1120 frame_val = tr32(MAC_MI_COM);
1121 if ((frame_val & MI_COM_BUSY) == 0) {
1123 frame_val = tr32(MAC_MI_COM);
1133 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1134 tw32_f(MAC_MI_MODE, tp->mi_mode);
1141 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1145 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1149 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1153 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1154 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1158 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1164 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1168 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1172 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1176 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1177 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1181 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1187 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1191 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1193 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1198 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1202 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1204 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1209 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1213 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1214 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1215 MII_TG3_AUXCTL_SHDWSEL_MISC);
1217 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1222 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1224 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1225 set |= MII_TG3_AUXCTL_MISC_WREN;
1227 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1230 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1231 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1232 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1233 MII_TG3_AUXCTL_ACTL_TX_6DB)
1235 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1236 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1237 MII_TG3_AUXCTL_ACTL_TX_6DB);
1239 static int tg3_bmcr_reset(struct tg3 *tp)
1244 /* OK, reset it, and poll the BMCR_RESET bit until it
1245 * clears or we time out.
1247 phy_control = BMCR_RESET;
1248 err = tg3_writephy(tp, MII_BMCR, phy_control);
1254 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1258 if ((phy_control & BMCR_RESET) == 0) {
1270 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1272 struct tg3 *tp = bp->priv;
1275 spin_lock_bh(&tp->lock);
1277 if (tg3_readphy(tp, reg, &val))
1280 spin_unlock_bh(&tp->lock);
1285 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1287 struct tg3 *tp = bp->priv;
1290 spin_lock_bh(&tp->lock);
1292 if (tg3_writephy(tp, reg, val))
1295 spin_unlock_bh(&tp->lock);
1300 static int tg3_mdio_reset(struct mii_bus *bp)
1305 static void tg3_mdio_config_5785(struct tg3 *tp)
1308 struct phy_device *phydev;
1310 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1311 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1312 case PHY_ID_BCM50610:
1313 case PHY_ID_BCM50610M:
1314 val = MAC_PHYCFG2_50610_LED_MODES;
1316 case PHY_ID_BCMAC131:
1317 val = MAC_PHYCFG2_AC131_LED_MODES;
1319 case PHY_ID_RTL8211C:
1320 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1322 case PHY_ID_RTL8201E:
1323 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1329 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1330 tw32(MAC_PHYCFG2, val);
1332 val = tr32(MAC_PHYCFG1);
1333 val &= ~(MAC_PHYCFG1_RGMII_INT |
1334 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1335 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1336 tw32(MAC_PHYCFG1, val);
1341 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1342 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1343 MAC_PHYCFG2_FMODE_MASK_MASK |
1344 MAC_PHYCFG2_GMODE_MASK_MASK |
1345 MAC_PHYCFG2_ACT_MASK_MASK |
1346 MAC_PHYCFG2_QUAL_MASK_MASK |
1347 MAC_PHYCFG2_INBAND_ENABLE;
1349 tw32(MAC_PHYCFG2, val);
1351 val = tr32(MAC_PHYCFG1);
1352 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1353 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1354 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1355 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1356 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1357 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1358 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1360 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1361 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1362 tw32(MAC_PHYCFG1, val);
1364 val = tr32(MAC_EXT_RGMII_MODE);
1365 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1366 MAC_RGMII_MODE_RX_QUALITY |
1367 MAC_RGMII_MODE_RX_ACTIVITY |
1368 MAC_RGMII_MODE_RX_ENG_DET |
1369 MAC_RGMII_MODE_TX_ENABLE |
1370 MAC_RGMII_MODE_TX_LOWPWR |
1371 MAC_RGMII_MODE_TX_RESET);
1372 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1373 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1374 val |= MAC_RGMII_MODE_RX_INT_B |
1375 MAC_RGMII_MODE_RX_QUALITY |
1376 MAC_RGMII_MODE_RX_ACTIVITY |
1377 MAC_RGMII_MODE_RX_ENG_DET;
1378 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1379 val |= MAC_RGMII_MODE_TX_ENABLE |
1380 MAC_RGMII_MODE_TX_LOWPWR |
1381 MAC_RGMII_MODE_TX_RESET;
1383 tw32(MAC_EXT_RGMII_MODE, val);
1386 static void tg3_mdio_start(struct tg3 *tp)
1388 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1389 tw32_f(MAC_MI_MODE, tp->mi_mode);
1392 if (tg3_flag(tp, MDIOBUS_INITED) &&
1393 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1394 tg3_mdio_config_5785(tp);
1397 static int tg3_mdio_init(struct tg3 *tp)
1401 struct phy_device *phydev;
1403 if (tg3_flag(tp, 5717_PLUS)) {
1406 tp->phy_addr = tp->pci_fn + 1;
1408 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1409 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1411 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1412 TG3_CPMU_PHY_STRAP_IS_SERDES;
1416 tp->phy_addr = TG3_PHY_MII_ADDR;
1420 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1423 tp->mdio_bus = mdiobus_alloc();
1424 if (tp->mdio_bus == NULL)
1427 tp->mdio_bus->name = "tg3 mdio bus";
1428 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1429 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1430 tp->mdio_bus->priv = tp;
1431 tp->mdio_bus->parent = &tp->pdev->dev;
1432 tp->mdio_bus->read = &tg3_mdio_read;
1433 tp->mdio_bus->write = &tg3_mdio_write;
1434 tp->mdio_bus->reset = &tg3_mdio_reset;
1435 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1436 tp->mdio_bus->irq = &tp->mdio_irq[0];
1438 for (i = 0; i < PHY_MAX_ADDR; i++)
1439 tp->mdio_bus->irq[i] = PHY_POLL;
1441 /* The bus registration will look for all the PHYs on the mdio bus.
1442 * Unfortunately, it does not ensure the PHY is powered up before
1443 * accessing the PHY ID registers. A chip reset is the
1444 * quickest way to bring the device back to an operational state..
1446 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1449 i = mdiobus_register(tp->mdio_bus);
1451 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1452 mdiobus_free(tp->mdio_bus);
1456 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1458 if (!phydev || !phydev->drv) {
1459 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1460 mdiobus_unregister(tp->mdio_bus);
1461 mdiobus_free(tp->mdio_bus);
1465 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1466 case PHY_ID_BCM57780:
1467 phydev->interface = PHY_INTERFACE_MODE_GMII;
1468 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1470 case PHY_ID_BCM50610:
1471 case PHY_ID_BCM50610M:
1472 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1473 PHY_BRCM_RX_REFCLK_UNUSED |
1474 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1475 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1476 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1477 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1478 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1479 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1480 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1481 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1483 case PHY_ID_RTL8211C:
1484 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1486 case PHY_ID_RTL8201E:
1487 case PHY_ID_BCMAC131:
1488 phydev->interface = PHY_INTERFACE_MODE_MII;
1489 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1490 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1494 tg3_flag_set(tp, MDIOBUS_INITED);
1496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1497 tg3_mdio_config_5785(tp);
1502 static void tg3_mdio_fini(struct tg3 *tp)
1504 if (tg3_flag(tp, MDIOBUS_INITED)) {
1505 tg3_flag_clear(tp, MDIOBUS_INITED);
1506 mdiobus_unregister(tp->mdio_bus);
1507 mdiobus_free(tp->mdio_bus);
1511 /* tp->lock is held. */
1512 static inline void tg3_generate_fw_event(struct tg3 *tp)
1516 val = tr32(GRC_RX_CPU_EVENT);
1517 val |= GRC_RX_CPU_DRIVER_EVENT;
1518 tw32_f(GRC_RX_CPU_EVENT, val);
1520 tp->last_event_jiffies = jiffies;
1523 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1525 /* tp->lock is held. */
1526 static void tg3_wait_for_event_ack(struct tg3 *tp)
1529 unsigned int delay_cnt;
1532 /* If enough time has passed, no wait is necessary. */
1533 time_remain = (long)(tp->last_event_jiffies + 1 +
1534 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1536 if (time_remain < 0)
1539 /* Check if we can shorten the wait time. */
1540 delay_cnt = jiffies_to_usecs(time_remain);
1541 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1542 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1543 delay_cnt = (delay_cnt >> 3) + 1;
1545 for (i = 0; i < delay_cnt; i++) {
1546 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1552 /* tp->lock is held. */
1553 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1558 if (!tg3_readphy(tp, MII_BMCR, ®))
1560 if (!tg3_readphy(tp, MII_BMSR, ®))
1561 val |= (reg & 0xffff);
1565 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1567 if (!tg3_readphy(tp, MII_LPA, ®))
1568 val |= (reg & 0xffff);
1572 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1573 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1575 if (!tg3_readphy(tp, MII_STAT1000, ®))
1576 val |= (reg & 0xffff);
1580 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1587 /* tp->lock is held. */
1588 static void tg3_ump_link_report(struct tg3 *tp)
1592 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1595 tg3_phy_gather_ump_data(tp, data);
1597 tg3_wait_for_event_ack(tp);
1599 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1600 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1601 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1602 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1603 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1604 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1606 tg3_generate_fw_event(tp);
1609 /* tp->lock is held. */
1610 static void tg3_stop_fw(struct tg3 *tp)
1612 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1613 /* Wait for RX cpu to ACK the previous event. */
1614 tg3_wait_for_event_ack(tp);
1616 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1618 tg3_generate_fw_event(tp);
1620 /* Wait for RX cpu to ACK this event. */
1621 tg3_wait_for_event_ack(tp);
1625 /* tp->lock is held. */
1626 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1628 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1629 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1631 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1633 case RESET_KIND_INIT:
1634 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1638 case RESET_KIND_SHUTDOWN:
1639 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1643 case RESET_KIND_SUSPEND:
1644 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1653 if (kind == RESET_KIND_INIT ||
1654 kind == RESET_KIND_SUSPEND)
1655 tg3_ape_driver_state_change(tp, kind);
1658 /* tp->lock is held. */
1659 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1661 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1663 case RESET_KIND_INIT:
1664 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1665 DRV_STATE_START_DONE);
1668 case RESET_KIND_SHUTDOWN:
1669 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1670 DRV_STATE_UNLOAD_DONE);
1678 if (kind == RESET_KIND_SHUTDOWN)
1679 tg3_ape_driver_state_change(tp, kind);
1682 /* tp->lock is held. */
1683 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1685 if (tg3_flag(tp, ENABLE_ASF)) {
1687 case RESET_KIND_INIT:
1688 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1692 case RESET_KIND_SHUTDOWN:
1693 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1697 case RESET_KIND_SUSPEND:
1698 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1708 static int tg3_poll_fw(struct tg3 *tp)
1713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1714 /* Wait up to 20ms for init done. */
1715 for (i = 0; i < 200; i++) {
1716 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1723 /* Wait for firmware initialization to complete. */
1724 for (i = 0; i < 100000; i++) {
1725 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1726 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1731 /* Chip might not be fitted with firmware. Some Sun onboard
1732 * parts are configured like that. So don't signal the timeout
1733 * of the above loop as an error, but do report the lack of
1734 * running firmware once.
1736 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1737 tg3_flag_set(tp, NO_FWARE_REPORTED);
1739 netdev_info(tp->dev, "No firmware running\n");
1742 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1743 /* The 57765 A0 needs a little more
1744 * time to do some important work.
1752 static void tg3_link_report(struct tg3 *tp)
1754 if (!netif_carrier_ok(tp->dev)) {
1755 netif_info(tp, link, tp->dev, "Link is down\n");
1756 tg3_ump_link_report(tp);
1757 } else if (netif_msg_link(tp)) {
1758 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1759 (tp->link_config.active_speed == SPEED_1000 ?
1761 (tp->link_config.active_speed == SPEED_100 ?
1763 (tp->link_config.active_duplex == DUPLEX_FULL ?
1766 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1767 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1769 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1772 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1773 netdev_info(tp->dev, "EEE is %s\n",
1774 tp->setlpicnt ? "enabled" : "disabled");
1776 tg3_ump_link_report(tp);
1780 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1784 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1785 miireg = ADVERTISE_1000XPAUSE;
1786 else if (flow_ctrl & FLOW_CTRL_TX)
1787 miireg = ADVERTISE_1000XPSE_ASYM;
1788 else if (flow_ctrl & FLOW_CTRL_RX)
1789 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1796 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1800 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1801 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1802 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1803 if (lcladv & ADVERTISE_1000XPAUSE)
1805 if (rmtadv & ADVERTISE_1000XPAUSE)
1812 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1816 u32 old_rx_mode = tp->rx_mode;
1817 u32 old_tx_mode = tp->tx_mode;
1819 if (tg3_flag(tp, USE_PHYLIB))
1820 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1822 autoneg = tp->link_config.autoneg;
1824 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1825 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1826 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1828 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1830 flowctrl = tp->link_config.flowctrl;
1832 tp->link_config.active_flowctrl = flowctrl;
1834 if (flowctrl & FLOW_CTRL_RX)
1835 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1837 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1839 if (old_rx_mode != tp->rx_mode)
1840 tw32_f(MAC_RX_MODE, tp->rx_mode);
1842 if (flowctrl & FLOW_CTRL_TX)
1843 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1845 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1847 if (old_tx_mode != tp->tx_mode)
1848 tw32_f(MAC_TX_MODE, tp->tx_mode);
1851 static void tg3_adjust_link(struct net_device *dev)
1853 u8 oldflowctrl, linkmesg = 0;
1854 u32 mac_mode, lcl_adv, rmt_adv;
1855 struct tg3 *tp = netdev_priv(dev);
1856 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1858 spin_lock_bh(&tp->lock);
1860 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1861 MAC_MODE_HALF_DUPLEX);
1863 oldflowctrl = tp->link_config.active_flowctrl;
1869 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1870 mac_mode |= MAC_MODE_PORT_MODE_MII;
1871 else if (phydev->speed == SPEED_1000 ||
1872 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1873 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1875 mac_mode |= MAC_MODE_PORT_MODE_MII;
1877 if (phydev->duplex == DUPLEX_HALF)
1878 mac_mode |= MAC_MODE_HALF_DUPLEX;
1880 lcl_adv = mii_advertise_flowctrl(
1881 tp->link_config.flowctrl);
1884 rmt_adv = LPA_PAUSE_CAP;
1885 if (phydev->asym_pause)
1886 rmt_adv |= LPA_PAUSE_ASYM;
1889 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1891 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1893 if (mac_mode != tp->mac_mode) {
1894 tp->mac_mode = mac_mode;
1895 tw32_f(MAC_MODE, tp->mac_mode);
1899 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1900 if (phydev->speed == SPEED_10)
1902 MAC_MI_STAT_10MBPS_MODE |
1903 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1905 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1908 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1909 tw32(MAC_TX_LENGTHS,
1910 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1911 (6 << TX_LENGTHS_IPG_SHIFT) |
1912 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1914 tw32(MAC_TX_LENGTHS,
1915 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1916 (6 << TX_LENGTHS_IPG_SHIFT) |
1917 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1919 if (phydev->link != tp->old_link ||
1920 phydev->speed != tp->link_config.active_speed ||
1921 phydev->duplex != tp->link_config.active_duplex ||
1922 oldflowctrl != tp->link_config.active_flowctrl)
1925 tp->old_link = phydev->link;
1926 tp->link_config.active_speed = phydev->speed;
1927 tp->link_config.active_duplex = phydev->duplex;
1929 spin_unlock_bh(&tp->lock);
1932 tg3_link_report(tp);
1935 static int tg3_phy_init(struct tg3 *tp)
1937 struct phy_device *phydev;
1939 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1942 /* Bring the PHY back to a known state. */
1945 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1947 /* Attach the MAC to the PHY. */
1948 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1949 phydev->dev_flags, phydev->interface);
1950 if (IS_ERR(phydev)) {
1951 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1952 return PTR_ERR(phydev);
1955 /* Mask with MAC supported features. */
1956 switch (phydev->interface) {
1957 case PHY_INTERFACE_MODE_GMII:
1958 case PHY_INTERFACE_MODE_RGMII:
1959 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1960 phydev->supported &= (PHY_GBIT_FEATURES |
1962 SUPPORTED_Asym_Pause);
1966 case PHY_INTERFACE_MODE_MII:
1967 phydev->supported &= (PHY_BASIC_FEATURES |
1969 SUPPORTED_Asym_Pause);
1972 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1976 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1978 phydev->advertising = phydev->supported;
1983 static void tg3_phy_start(struct tg3 *tp)
1985 struct phy_device *phydev;
1987 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1990 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1992 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1993 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1994 phydev->speed = tp->link_config.speed;
1995 phydev->duplex = tp->link_config.duplex;
1996 phydev->autoneg = tp->link_config.autoneg;
1997 phydev->advertising = tp->link_config.advertising;
2002 phy_start_aneg(phydev);
2005 static void tg3_phy_stop(struct tg3 *tp)
2007 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2010 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2013 static void tg3_phy_fini(struct tg3 *tp)
2015 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2016 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2017 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2021 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2026 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2029 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2030 /* Cannot do read-modify-write on 5401 */
2031 err = tg3_phy_auxctl_write(tp,
2032 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2033 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2038 err = tg3_phy_auxctl_read(tp,
2039 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2043 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2044 err = tg3_phy_auxctl_write(tp,
2045 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2051 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2055 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2058 tg3_writephy(tp, MII_TG3_FET_TEST,
2059 phytest | MII_TG3_FET_SHADOW_EN);
2060 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2062 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2064 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2065 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2067 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2071 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2075 if (!tg3_flag(tp, 5705_PLUS) ||
2076 (tg3_flag(tp, 5717_PLUS) &&
2077 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2080 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2081 tg3_phy_fet_toggle_apd(tp, enable);
2085 reg = MII_TG3_MISC_SHDW_WREN |
2086 MII_TG3_MISC_SHDW_SCR5_SEL |
2087 MII_TG3_MISC_SHDW_SCR5_LPED |
2088 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2089 MII_TG3_MISC_SHDW_SCR5_SDTL |
2090 MII_TG3_MISC_SHDW_SCR5_C125OE;
2091 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2092 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2094 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2097 reg = MII_TG3_MISC_SHDW_WREN |
2098 MII_TG3_MISC_SHDW_APD_SEL |
2099 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2101 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2103 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2106 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2110 if (!tg3_flag(tp, 5705_PLUS) ||
2111 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2114 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2117 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2118 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2120 tg3_writephy(tp, MII_TG3_FET_TEST,
2121 ephy | MII_TG3_FET_SHADOW_EN);
2122 if (!tg3_readphy(tp, reg, &phy)) {
2124 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2126 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2127 tg3_writephy(tp, reg, phy);
2129 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2134 ret = tg3_phy_auxctl_read(tp,
2135 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2138 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2140 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2141 tg3_phy_auxctl_write(tp,
2142 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2147 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2152 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2155 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2157 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2158 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2161 static void tg3_phy_apply_otp(struct tg3 *tp)
2170 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2173 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2174 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2175 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2177 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2178 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2179 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2181 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2182 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2183 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2185 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2186 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2188 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2189 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2191 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2192 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2193 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2195 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2198 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2202 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2207 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2208 current_link_up == 1 &&
2209 tp->link_config.active_duplex == DUPLEX_FULL &&
2210 (tp->link_config.active_speed == SPEED_100 ||
2211 tp->link_config.active_speed == SPEED_1000)) {
2214 if (tp->link_config.active_speed == SPEED_1000)
2215 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2217 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2219 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2221 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2222 TG3_CL45_D7_EEERES_STAT, &val);
2224 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2225 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2229 if (!tp->setlpicnt) {
2230 if (current_link_up == 1 &&
2231 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2232 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2233 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2236 val = tr32(TG3_CPMU_EEE_MODE);
2237 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2241 static void tg3_phy_eee_enable(struct tg3 *tp)
2245 if (tp->link_config.active_speed == SPEED_1000 &&
2246 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2248 tg3_flag(tp, 57765_CLASS)) &&
2249 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2250 val = MII_TG3_DSP_TAP26_ALNOKO |
2251 MII_TG3_DSP_TAP26_RMRXSTO;
2252 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2253 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2256 val = tr32(TG3_CPMU_EEE_MODE);
2257 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2260 static int tg3_wait_macro_done(struct tg3 *tp)
2267 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2268 if ((tmp32 & 0x1000) == 0)
2278 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2280 static const u32 test_pat[4][6] = {
2281 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2282 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2283 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2284 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2288 for (chan = 0; chan < 4; chan++) {
2291 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2292 (chan * 0x2000) | 0x0200);
2293 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2295 for (i = 0; i < 6; i++)
2296 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2299 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2300 if (tg3_wait_macro_done(tp)) {
2305 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2306 (chan * 0x2000) | 0x0200);
2307 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2308 if (tg3_wait_macro_done(tp)) {
2313 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2314 if (tg3_wait_macro_done(tp)) {
2319 for (i = 0; i < 6; i += 2) {
2322 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2323 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2324 tg3_wait_macro_done(tp)) {
2330 if (low != test_pat[chan][i] ||
2331 high != test_pat[chan][i+1]) {
2332 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2333 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2334 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2344 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2348 for (chan = 0; chan < 4; chan++) {
2351 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2352 (chan * 0x2000) | 0x0200);
2353 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2354 for (i = 0; i < 6; i++)
2355 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2356 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2357 if (tg3_wait_macro_done(tp))
2364 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2366 u32 reg32, phy9_orig;
2367 int retries, do_phy_reset, err;
2373 err = tg3_bmcr_reset(tp);
2379 /* Disable transmitter and interrupt. */
2380 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2384 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2386 /* Set full-duplex, 1000 mbps. */
2387 tg3_writephy(tp, MII_BMCR,
2388 BMCR_FULLDPLX | BMCR_SPEED1000);
2390 /* Set to master mode. */
2391 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2394 tg3_writephy(tp, MII_CTRL1000,
2395 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2397 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2401 /* Block the PHY control access. */
2402 tg3_phydsp_write(tp, 0x8005, 0x0800);
2404 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2407 } while (--retries);
2409 err = tg3_phy_reset_chanpat(tp);
2413 tg3_phydsp_write(tp, 0x8005, 0x0000);
2415 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2416 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2418 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2420 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2422 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2424 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2431 /* This will reset the tigon3 PHY if there is no valid
2432 * link unless the FORCE argument is non-zero.
2434 static int tg3_phy_reset(struct tg3 *tp)
2439 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2440 val = tr32(GRC_MISC_CFG);
2441 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2444 err = tg3_readphy(tp, MII_BMSR, &val);
2445 err |= tg3_readphy(tp, MII_BMSR, &val);
2449 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2450 netif_carrier_off(tp->dev);
2451 tg3_link_report(tp);
2454 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2457 err = tg3_phy_reset_5703_4_5(tp);
2464 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2465 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2466 cpmuctrl = tr32(TG3_CPMU_CTRL);
2467 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2469 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2472 err = tg3_bmcr_reset(tp);
2476 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2477 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2478 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2480 tw32(TG3_CPMU_CTRL, cpmuctrl);
2483 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2484 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2485 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2486 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2487 CPMU_LSPD_1000MB_MACCLK_12_5) {
2488 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2490 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2494 if (tg3_flag(tp, 5717_PLUS) &&
2495 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2498 tg3_phy_apply_otp(tp);
2500 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2501 tg3_phy_toggle_apd(tp, true);
2503 tg3_phy_toggle_apd(tp, false);
2506 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2507 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2508 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2509 tg3_phydsp_write(tp, 0x000a, 0x0323);
2510 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2513 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2514 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2515 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2518 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2519 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2520 tg3_phydsp_write(tp, 0x000a, 0x310b);
2521 tg3_phydsp_write(tp, 0x201f, 0x9506);
2522 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2523 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2525 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2526 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2527 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2528 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2529 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2530 tg3_writephy(tp, MII_TG3_TEST1,
2531 MII_TG3_TEST1_TRIM_EN | 0x4);
2533 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2535 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2539 /* Set Extended packet length bit (bit 14) on all chips that */
2540 /* support jumbo frames */
2541 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2542 /* Cannot do read-modify-write on 5401 */
2543 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2544 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2545 /* Set bit 14 with read-modify-write to preserve other bits */
2546 err = tg3_phy_auxctl_read(tp,
2547 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2549 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2550 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2553 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2554 * jumbo frames transmission.
2556 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2557 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2558 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2559 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2563 /* adjust output voltage */
2564 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2567 tg3_phy_toggle_automdix(tp, 1);
2568 tg3_phy_set_wirespeed(tp);
2572 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2573 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2574 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2575 TG3_GPIO_MSG_NEED_VAUX)
2576 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2577 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2578 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2579 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2580 (TG3_GPIO_MSG_DRVR_PRES << 12))
2582 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2583 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2584 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2585 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2586 (TG3_GPIO_MSG_NEED_VAUX << 12))
2588 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2592 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2594 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2596 status = tr32(TG3_CPMU_DRV_STATUS);
2598 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2599 status &= ~(TG3_GPIO_MSG_MASK << shift);
2600 status |= (newstat << shift);
2602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2603 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2604 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2606 tw32(TG3_CPMU_DRV_STATUS, status);
2608 return status >> TG3_APE_GPIO_MSG_SHIFT;
2611 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2613 if (!tg3_flag(tp, IS_NIC))
2616 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2617 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2618 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2619 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2622 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2624 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2625 TG3_GRC_LCLCTL_PWRSW_DELAY);
2627 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2629 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2630 TG3_GRC_LCLCTL_PWRSW_DELAY);
2636 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2640 if (!tg3_flag(tp, IS_NIC) ||
2641 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2645 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2647 tw32_wait_f(GRC_LOCAL_CTRL,
2648 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2649 TG3_GRC_LCLCTL_PWRSW_DELAY);
2651 tw32_wait_f(GRC_LOCAL_CTRL,
2653 TG3_GRC_LCLCTL_PWRSW_DELAY);
2655 tw32_wait_f(GRC_LOCAL_CTRL,
2656 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2657 TG3_GRC_LCLCTL_PWRSW_DELAY);
2660 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2662 if (!tg3_flag(tp, IS_NIC))
2665 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2666 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2667 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2668 (GRC_LCLCTRL_GPIO_OE0 |
2669 GRC_LCLCTRL_GPIO_OE1 |
2670 GRC_LCLCTRL_GPIO_OE2 |
2671 GRC_LCLCTRL_GPIO_OUTPUT0 |
2672 GRC_LCLCTRL_GPIO_OUTPUT1),
2673 TG3_GRC_LCLCTL_PWRSW_DELAY);
2674 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2675 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2676 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2677 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2678 GRC_LCLCTRL_GPIO_OE1 |
2679 GRC_LCLCTRL_GPIO_OE2 |
2680 GRC_LCLCTRL_GPIO_OUTPUT0 |
2681 GRC_LCLCTRL_GPIO_OUTPUT1 |
2683 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2684 TG3_GRC_LCLCTL_PWRSW_DELAY);
2686 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2687 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2688 TG3_GRC_LCLCTL_PWRSW_DELAY);
2690 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2691 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2692 TG3_GRC_LCLCTL_PWRSW_DELAY);
2695 u32 grc_local_ctrl = 0;
2697 /* Workaround to prevent overdrawing Amps. */
2698 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2699 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2700 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2702 TG3_GRC_LCLCTL_PWRSW_DELAY);
2705 /* On 5753 and variants, GPIO2 cannot be used. */
2706 no_gpio2 = tp->nic_sram_data_cfg &
2707 NIC_SRAM_DATA_CFG_NO_GPIO2;
2709 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2710 GRC_LCLCTRL_GPIO_OE1 |
2711 GRC_LCLCTRL_GPIO_OE2 |
2712 GRC_LCLCTRL_GPIO_OUTPUT1 |
2713 GRC_LCLCTRL_GPIO_OUTPUT2;
2715 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2716 GRC_LCLCTRL_GPIO_OUTPUT2);
2718 tw32_wait_f(GRC_LOCAL_CTRL,
2719 tp->grc_local_ctrl | grc_local_ctrl,
2720 TG3_GRC_LCLCTL_PWRSW_DELAY);
2722 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2724 tw32_wait_f(GRC_LOCAL_CTRL,
2725 tp->grc_local_ctrl | grc_local_ctrl,
2726 TG3_GRC_LCLCTL_PWRSW_DELAY);
2729 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2730 tw32_wait_f(GRC_LOCAL_CTRL,
2731 tp->grc_local_ctrl | grc_local_ctrl,
2732 TG3_GRC_LCLCTL_PWRSW_DELAY);
2737 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2741 /* Serialize power state transitions */
2742 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2745 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2746 msg = TG3_GPIO_MSG_NEED_VAUX;
2748 msg = tg3_set_function_status(tp, msg);
2750 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2753 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2754 tg3_pwrsrc_switch_to_vaux(tp);
2756 tg3_pwrsrc_die_with_vmain(tp);
2759 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2762 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2764 bool need_vaux = false;
2766 /* The GPIOs do something completely different on 57765. */
2767 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2770 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2771 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2772 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2773 tg3_frob_aux_power_5717(tp, include_wol ?
2774 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2778 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2779 struct net_device *dev_peer;
2781 dev_peer = pci_get_drvdata(tp->pdev_peer);
2783 /* remove_one() may have been run on the peer. */
2785 struct tg3 *tp_peer = netdev_priv(dev_peer);
2787 if (tg3_flag(tp_peer, INIT_COMPLETE))
2790 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2791 tg3_flag(tp_peer, ENABLE_ASF))
2796 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2797 tg3_flag(tp, ENABLE_ASF))
2801 tg3_pwrsrc_switch_to_vaux(tp);
2803 tg3_pwrsrc_die_with_vmain(tp);
2806 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2808 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2810 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2811 if (speed != SPEED_10)
2813 } else if (speed == SPEED_10)
2819 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2823 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2824 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2825 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2826 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2829 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2830 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2831 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2836 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2838 val = tr32(GRC_MISC_CFG);
2839 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2842 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2844 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2847 tg3_writephy(tp, MII_ADVERTISE, 0);
2848 tg3_writephy(tp, MII_BMCR,
2849 BMCR_ANENABLE | BMCR_ANRESTART);
2851 tg3_writephy(tp, MII_TG3_FET_TEST,
2852 phytest | MII_TG3_FET_SHADOW_EN);
2853 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2854 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2856 MII_TG3_FET_SHDW_AUXMODE4,
2859 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2862 } else if (do_low_power) {
2863 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2864 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2866 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2867 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2868 MII_TG3_AUXCTL_PCTL_VREG_11V;
2869 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2872 /* The PHY should not be powered down on some chips because
2875 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2876 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2877 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2878 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2879 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2883 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2884 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2885 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2886 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2887 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2888 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2891 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2894 /* tp->lock is held. */
2895 static int tg3_nvram_lock(struct tg3 *tp)
2897 if (tg3_flag(tp, NVRAM)) {
2900 if (tp->nvram_lock_cnt == 0) {
2901 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2902 for (i = 0; i < 8000; i++) {
2903 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2908 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2912 tp->nvram_lock_cnt++;
2917 /* tp->lock is held. */
2918 static void tg3_nvram_unlock(struct tg3 *tp)
2920 if (tg3_flag(tp, NVRAM)) {
2921 if (tp->nvram_lock_cnt > 0)
2922 tp->nvram_lock_cnt--;
2923 if (tp->nvram_lock_cnt == 0)
2924 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2928 /* tp->lock is held. */
2929 static void tg3_enable_nvram_access(struct tg3 *tp)
2931 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2932 u32 nvaccess = tr32(NVRAM_ACCESS);
2934 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2938 /* tp->lock is held. */
2939 static void tg3_disable_nvram_access(struct tg3 *tp)
2941 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2942 u32 nvaccess = tr32(NVRAM_ACCESS);
2944 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2948 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2949 u32 offset, u32 *val)
2954 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2957 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2958 EEPROM_ADDR_DEVID_MASK |
2960 tw32(GRC_EEPROM_ADDR,
2962 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2963 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2964 EEPROM_ADDR_ADDR_MASK) |
2965 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2967 for (i = 0; i < 1000; i++) {
2968 tmp = tr32(GRC_EEPROM_ADDR);
2970 if (tmp & EEPROM_ADDR_COMPLETE)
2974 if (!(tmp & EEPROM_ADDR_COMPLETE))
2977 tmp = tr32(GRC_EEPROM_DATA);
2980 * The data will always be opposite the native endian
2981 * format. Perform a blind byteswap to compensate.
2988 #define NVRAM_CMD_TIMEOUT 10000
2990 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2994 tw32(NVRAM_CMD, nvram_cmd);
2995 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2997 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3003 if (i == NVRAM_CMD_TIMEOUT)
3009 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3011 if (tg3_flag(tp, NVRAM) &&
3012 tg3_flag(tp, NVRAM_BUFFERED) &&
3013 tg3_flag(tp, FLASH) &&
3014 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3015 (tp->nvram_jedecnum == JEDEC_ATMEL))
3017 addr = ((addr / tp->nvram_pagesize) <<
3018 ATMEL_AT45DB0X1B_PAGE_POS) +
3019 (addr % tp->nvram_pagesize);
3024 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3026 if (tg3_flag(tp, NVRAM) &&
3027 tg3_flag(tp, NVRAM_BUFFERED) &&
3028 tg3_flag(tp, FLASH) &&
3029 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3030 (tp->nvram_jedecnum == JEDEC_ATMEL))
3032 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3033 tp->nvram_pagesize) +
3034 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3039 /* NOTE: Data read in from NVRAM is byteswapped according to
3040 * the byteswapping settings for all other register accesses.
3041 * tg3 devices are BE devices, so on a BE machine, the data
3042 * returned will be exactly as it is seen in NVRAM. On a LE
3043 * machine, the 32-bit value will be byteswapped.
3045 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3049 if (!tg3_flag(tp, NVRAM))
3050 return tg3_nvram_read_using_eeprom(tp, offset, val);
3052 offset = tg3_nvram_phys_addr(tp, offset);
3054 if (offset > NVRAM_ADDR_MSK)
3057 ret = tg3_nvram_lock(tp);
3061 tg3_enable_nvram_access(tp);
3063 tw32(NVRAM_ADDR, offset);
3064 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3065 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3068 *val = tr32(NVRAM_RDDATA);
3070 tg3_disable_nvram_access(tp);
3072 tg3_nvram_unlock(tp);
3077 /* Ensures NVRAM data is in bytestream format. */
3078 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3081 int res = tg3_nvram_read(tp, offset, &v);
3083 *val = cpu_to_be32(v);
3087 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3088 u32 offset, u32 len, u8 *buf)
3093 for (i = 0; i < len; i += 4) {
3099 memcpy(&data, buf + i, 4);
3102 * The SEEPROM interface expects the data to always be opposite
3103 * the native endian format. We accomplish this by reversing
3104 * all the operations that would have been performed on the
3105 * data from a call to tg3_nvram_read_be32().
3107 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3109 val = tr32(GRC_EEPROM_ADDR);
3110 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3112 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3114 tw32(GRC_EEPROM_ADDR, val |
3115 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3116 (addr & EEPROM_ADDR_ADDR_MASK) |
3120 for (j = 0; j < 1000; j++) {
3121 val = tr32(GRC_EEPROM_ADDR);
3123 if (val & EEPROM_ADDR_COMPLETE)
3127 if (!(val & EEPROM_ADDR_COMPLETE)) {
3136 /* offset and length are dword aligned */
3137 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3141 u32 pagesize = tp->nvram_pagesize;
3142 u32 pagemask = pagesize - 1;
3146 tmp = kmalloc(pagesize, GFP_KERNEL);
3152 u32 phy_addr, page_off, size;
3154 phy_addr = offset & ~pagemask;
3156 for (j = 0; j < pagesize; j += 4) {
3157 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3158 (__be32 *) (tmp + j));
3165 page_off = offset & pagemask;
3172 memcpy(tmp + page_off, buf, size);
3174 offset = offset + (pagesize - page_off);
3176 tg3_enable_nvram_access(tp);
3179 * Before we can erase the flash page, we need
3180 * to issue a special "write enable" command.
3182 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3184 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3187 /* Erase the target page */
3188 tw32(NVRAM_ADDR, phy_addr);
3190 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3191 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3193 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3196 /* Issue another write enable to start the write. */
3197 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3199 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3202 for (j = 0; j < pagesize; j += 4) {
3205 data = *((__be32 *) (tmp + j));
3207 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3209 tw32(NVRAM_ADDR, phy_addr + j);
3211 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3215 nvram_cmd |= NVRAM_CMD_FIRST;
3216 else if (j == (pagesize - 4))
3217 nvram_cmd |= NVRAM_CMD_LAST;
3219 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3227 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3228 tg3_nvram_exec_cmd(tp, nvram_cmd);
3235 /* offset and length are dword aligned */
3236 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3241 for (i = 0; i < len; i += 4, offset += 4) {
3242 u32 page_off, phy_addr, nvram_cmd;
3245 memcpy(&data, buf + i, 4);
3246 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3248 page_off = offset % tp->nvram_pagesize;
3250 phy_addr = tg3_nvram_phys_addr(tp, offset);
3252 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3254 if (page_off == 0 || i == 0)
3255 nvram_cmd |= NVRAM_CMD_FIRST;
3256 if (page_off == (tp->nvram_pagesize - 4))
3257 nvram_cmd |= NVRAM_CMD_LAST;
3260 nvram_cmd |= NVRAM_CMD_LAST;
3262 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3263 !tg3_flag(tp, FLASH) ||
3264 !tg3_flag(tp, 57765_PLUS))
3265 tw32(NVRAM_ADDR, phy_addr);
3267 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3268 !tg3_flag(tp, 5755_PLUS) &&
3269 (tp->nvram_jedecnum == JEDEC_ST) &&
3270 (nvram_cmd & NVRAM_CMD_FIRST)) {
3273 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3274 ret = tg3_nvram_exec_cmd(tp, cmd);
3278 if (!tg3_flag(tp, FLASH)) {
3279 /* We always do complete word writes to eeprom. */
3280 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3283 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3290 /* offset and length are dword aligned */
3291 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3295 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3296 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3297 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3301 if (!tg3_flag(tp, NVRAM)) {
3302 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3306 ret = tg3_nvram_lock(tp);
3310 tg3_enable_nvram_access(tp);
3311 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3312 tw32(NVRAM_WRITE1, 0x406);
3314 grc_mode = tr32(GRC_MODE);
3315 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3317 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3318 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3321 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3325 grc_mode = tr32(GRC_MODE);
3326 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3328 tg3_disable_nvram_access(tp);
3329 tg3_nvram_unlock(tp);
3332 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3333 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3340 #define RX_CPU_SCRATCH_BASE 0x30000
3341 #define RX_CPU_SCRATCH_SIZE 0x04000
3342 #define TX_CPU_SCRATCH_BASE 0x34000
3343 #define TX_CPU_SCRATCH_SIZE 0x04000
3345 /* tp->lock is held. */
3346 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3350 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3352 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3353 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3355 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3358 if (offset == RX_CPU_BASE) {
3359 for (i = 0; i < 10000; i++) {
3360 tw32(offset + CPU_STATE, 0xffffffff);
3361 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3362 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3366 tw32(offset + CPU_STATE, 0xffffffff);
3367 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3370 for (i = 0; i < 10000; i++) {
3371 tw32(offset + CPU_STATE, 0xffffffff);
3372 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3373 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3379 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3380 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3384 /* Clear firmware's nvram arbitration. */
3385 if (tg3_flag(tp, NVRAM))
3386 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3391 unsigned int fw_base;
3392 unsigned int fw_len;
3393 const __be32 *fw_data;
3396 /* tp->lock is held. */
3397 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3398 u32 cpu_scratch_base, int cpu_scratch_size,
3399 struct fw_info *info)
3401 int err, lock_err, i;
3402 void (*write_op)(struct tg3 *, u32, u32);
3404 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3406 "%s: Trying to load TX cpu firmware which is 5705\n",
3411 if (tg3_flag(tp, 5705_PLUS))
3412 write_op = tg3_write_mem;
3414 write_op = tg3_write_indirect_reg32;
3416 /* It is possible that bootcode is still loading at this point.
3417 * Get the nvram lock first before halting the cpu.
3419 lock_err = tg3_nvram_lock(tp);
3420 err = tg3_halt_cpu(tp, cpu_base);
3422 tg3_nvram_unlock(tp);
3426 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3427 write_op(tp, cpu_scratch_base + i, 0);
3428 tw32(cpu_base + CPU_STATE, 0xffffffff);
3429 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3430 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3431 write_op(tp, (cpu_scratch_base +
3432 (info->fw_base & 0xffff) +
3434 be32_to_cpu(info->fw_data[i]));
3442 /* tp->lock is held. */
3443 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3445 struct fw_info info;
3446 const __be32 *fw_data;
3449 fw_data = (void *)tp->fw->data;
3451 /* Firmware blob starts with version numbers, followed by
3452 start address and length. We are setting complete length.
3453 length = end_address_of_bss - start_address_of_text.
3454 Remainder is the blob to be loaded contiguously
3455 from start address. */
3457 info.fw_base = be32_to_cpu(fw_data[1]);
3458 info.fw_len = tp->fw->size - 12;
3459 info.fw_data = &fw_data[3];
3461 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3462 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3467 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3468 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3473 /* Now startup only the RX cpu. */
3474 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3475 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3477 for (i = 0; i < 5; i++) {
3478 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3480 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3481 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3482 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3486 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3487 "should be %08x\n", __func__,
3488 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3491 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3492 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3497 /* tp->lock is held. */
3498 static int tg3_load_tso_firmware(struct tg3 *tp)
3500 struct fw_info info;
3501 const __be32 *fw_data;
3502 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3505 if (tg3_flag(tp, HW_TSO_1) ||
3506 tg3_flag(tp, HW_TSO_2) ||
3507 tg3_flag(tp, HW_TSO_3))
3510 fw_data = (void *)tp->fw->data;
3512 /* Firmware blob starts with version numbers, followed by
3513 start address and length. We are setting complete length.
3514 length = end_address_of_bss - start_address_of_text.
3515 Remainder is the blob to be loaded contiguously
3516 from start address. */
3518 info.fw_base = be32_to_cpu(fw_data[1]);
3519 cpu_scratch_size = tp->fw_len;
3520 info.fw_len = tp->fw->size - 12;
3521 info.fw_data = &fw_data[3];
3523 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3524 cpu_base = RX_CPU_BASE;
3525 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3527 cpu_base = TX_CPU_BASE;
3528 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3529 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3532 err = tg3_load_firmware_cpu(tp, cpu_base,
3533 cpu_scratch_base, cpu_scratch_size,
3538 /* Now startup the cpu. */
3539 tw32(cpu_base + CPU_STATE, 0xffffffff);
3540 tw32_f(cpu_base + CPU_PC, info.fw_base);
3542 for (i = 0; i < 5; i++) {
3543 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3545 tw32(cpu_base + CPU_STATE, 0xffffffff);
3546 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3547 tw32_f(cpu_base + CPU_PC, info.fw_base);
3552 "%s fails to set CPU PC, is %08x should be %08x\n",
3553 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3556 tw32(cpu_base + CPU_STATE, 0xffffffff);
3557 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3562 /* tp->lock is held. */
3563 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3565 u32 addr_high, addr_low;
3568 addr_high = ((tp->dev->dev_addr[0] << 8) |
3569 tp->dev->dev_addr[1]);
3570 addr_low = ((tp->dev->dev_addr[2] << 24) |
3571 (tp->dev->dev_addr[3] << 16) |
3572 (tp->dev->dev_addr[4] << 8) |
3573 (tp->dev->dev_addr[5] << 0));
3574 for (i = 0; i < 4; i++) {
3575 if (i == 1 && skip_mac_1)
3577 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3578 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3581 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3582 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3583 for (i = 0; i < 12; i++) {
3584 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3585 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3589 addr_high = (tp->dev->dev_addr[0] +
3590 tp->dev->dev_addr[1] +
3591 tp->dev->dev_addr[2] +
3592 tp->dev->dev_addr[3] +
3593 tp->dev->dev_addr[4] +
3594 tp->dev->dev_addr[5]) &
3595 TX_BACKOFF_SEED_MASK;
3596 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3599 static void tg3_enable_register_access(struct tg3 *tp)
3602 * Make sure register accesses (indirect or otherwise) will function
3605 pci_write_config_dword(tp->pdev,
3606 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3609 static int tg3_power_up(struct tg3 *tp)
3613 tg3_enable_register_access(tp);
3615 err = pci_set_power_state(tp->pdev, PCI_D0);
3617 /* Switch out of Vaux if it is a NIC */
3618 tg3_pwrsrc_switch_to_vmain(tp);
3620 netdev_err(tp->dev, "Transition to D0 failed\n");
3626 static int tg3_setup_phy(struct tg3 *, int);
3628 static int tg3_power_down_prepare(struct tg3 *tp)
3631 bool device_should_wake, do_low_power;
3633 tg3_enable_register_access(tp);
3635 /* Restore the CLKREQ setting. */
3636 if (tg3_flag(tp, CLKREQ_BUG)) {
3639 pci_read_config_word(tp->pdev,
3640 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3642 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3643 pci_write_config_word(tp->pdev,
3644 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3648 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3649 tw32(TG3PCI_MISC_HOST_CTRL,
3650 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3652 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3653 tg3_flag(tp, WOL_ENABLE);
3655 if (tg3_flag(tp, USE_PHYLIB)) {
3656 do_low_power = false;
3657 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3658 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3659 struct phy_device *phydev;
3660 u32 phyid, advertising;
3662 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3664 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3666 tp->link_config.speed = phydev->speed;
3667 tp->link_config.duplex = phydev->duplex;
3668 tp->link_config.autoneg = phydev->autoneg;
3669 tp->link_config.advertising = phydev->advertising;
3671 advertising = ADVERTISED_TP |
3673 ADVERTISED_Autoneg |
3674 ADVERTISED_10baseT_Half;
3676 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3677 if (tg3_flag(tp, WOL_SPEED_100MB))
3679 ADVERTISED_100baseT_Half |
3680 ADVERTISED_100baseT_Full |
3681 ADVERTISED_10baseT_Full;
3683 advertising |= ADVERTISED_10baseT_Full;
3686 phydev->advertising = advertising;
3688 phy_start_aneg(phydev);
3690 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3691 if (phyid != PHY_ID_BCMAC131) {
3692 phyid &= PHY_BCM_OUI_MASK;
3693 if (phyid == PHY_BCM_OUI_1 ||
3694 phyid == PHY_BCM_OUI_2 ||
3695 phyid == PHY_BCM_OUI_3)
3696 do_low_power = true;
3700 do_low_power = true;
3702 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3703 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3705 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3706 tg3_setup_phy(tp, 0);
3709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3712 val = tr32(GRC_VCPU_EXT_CTRL);
3713 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3714 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3718 for (i = 0; i < 200; i++) {
3719 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3720 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3725 if (tg3_flag(tp, WOL_CAP))
3726 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3727 WOL_DRV_STATE_SHUTDOWN |
3731 if (device_should_wake) {
3734 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3736 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3737 tg3_phy_auxctl_write(tp,
3738 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3739 MII_TG3_AUXCTL_PCTL_WOL_EN |
3740 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3741 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3745 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3746 mac_mode = MAC_MODE_PORT_MODE_GMII;
3748 mac_mode = MAC_MODE_PORT_MODE_MII;
3750 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3751 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3753 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3754 SPEED_100 : SPEED_10;
3755 if (tg3_5700_link_polarity(tp, speed))
3756 mac_mode |= MAC_MODE_LINK_POLARITY;
3758 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3761 mac_mode = MAC_MODE_PORT_MODE_TBI;
3764 if (!tg3_flag(tp, 5750_PLUS))
3765 tw32(MAC_LED_CTRL, tp->led_ctrl);
3767 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3768 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3769 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3770 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3772 if (tg3_flag(tp, ENABLE_APE))
3773 mac_mode |= MAC_MODE_APE_TX_EN |
3774 MAC_MODE_APE_RX_EN |
3775 MAC_MODE_TDE_ENABLE;
3777 tw32_f(MAC_MODE, mac_mode);
3780 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3784 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3785 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3786 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3789 base_val = tp->pci_clock_ctrl;
3790 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3791 CLOCK_CTRL_TXCLK_DISABLE);
3793 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3794 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3795 } else if (tg3_flag(tp, 5780_CLASS) ||
3796 tg3_flag(tp, CPMU_PRESENT) ||
3797 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3799 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3800 u32 newbits1, newbits2;
3802 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3803 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3804 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3805 CLOCK_CTRL_TXCLK_DISABLE |
3807 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3808 } else if (tg3_flag(tp, 5705_PLUS)) {
3809 newbits1 = CLOCK_CTRL_625_CORE;
3810 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3812 newbits1 = CLOCK_CTRL_ALTCLK;
3813 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3816 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3819 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3822 if (!tg3_flag(tp, 5705_PLUS)) {
3825 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3826 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3827 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3828 CLOCK_CTRL_TXCLK_DISABLE |
3829 CLOCK_CTRL_44MHZ_CORE);
3831 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3834 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3835 tp->pci_clock_ctrl | newbits3, 40);
3839 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3840 tg3_power_down_phy(tp, do_low_power);
3842 tg3_frob_aux_power(tp, true);
3844 /* Workaround for unstable PLL clock */
3845 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3846 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3847 u32 val = tr32(0x7d00);
3849 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3851 if (!tg3_flag(tp, ENABLE_ASF)) {
3854 err = tg3_nvram_lock(tp);
3855 tg3_halt_cpu(tp, RX_CPU_BASE);
3857 tg3_nvram_unlock(tp);
3861 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3866 static void tg3_power_down(struct tg3 *tp)
3868 tg3_power_down_prepare(tp);
3870 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3871 pci_set_power_state(tp->pdev, PCI_D3hot);
3874 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3876 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3877 case MII_TG3_AUX_STAT_10HALF:
3879 *duplex = DUPLEX_HALF;
3882 case MII_TG3_AUX_STAT_10FULL:
3884 *duplex = DUPLEX_FULL;
3887 case MII_TG3_AUX_STAT_100HALF:
3889 *duplex = DUPLEX_HALF;
3892 case MII_TG3_AUX_STAT_100FULL:
3894 *duplex = DUPLEX_FULL;
3897 case MII_TG3_AUX_STAT_1000HALF:
3898 *speed = SPEED_1000;
3899 *duplex = DUPLEX_HALF;
3902 case MII_TG3_AUX_STAT_1000FULL:
3903 *speed = SPEED_1000;
3904 *duplex = DUPLEX_FULL;
3908 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3909 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3911 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3915 *speed = SPEED_UNKNOWN;
3916 *duplex = DUPLEX_UNKNOWN;
3921 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3926 new_adv = ADVERTISE_CSMA;
3927 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3928 new_adv |= mii_advertise_flowctrl(flowctrl);
3930 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3934 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3935 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3937 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3938 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3939 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3941 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3946 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3949 tw32(TG3_CPMU_EEE_MODE,
3950 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3952 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3957 /* Advertise 100-BaseTX EEE ability */
3958 if (advertise & ADVERTISED_100baseT_Full)
3959 val |= MDIO_AN_EEE_ADV_100TX;
3960 /* Advertise 1000-BaseT EEE ability */
3961 if (advertise & ADVERTISED_1000baseT_Full)
3962 val |= MDIO_AN_EEE_ADV_1000T;
3963 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3967 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3969 case ASIC_REV_57765:
3970 case ASIC_REV_57766:
3972 /* If we advertised any eee advertisements above... */
3974 val = MII_TG3_DSP_TAP26_ALNOKO |
3975 MII_TG3_DSP_TAP26_RMRXSTO |
3976 MII_TG3_DSP_TAP26_OPCSINPT;
3977 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3980 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3981 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3982 MII_TG3_DSP_CH34TP2_HIBW01);
3985 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3994 static void tg3_phy_copper_begin(struct tg3 *tp)
3996 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3997 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4000 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4001 adv = ADVERTISED_10baseT_Half |
4002 ADVERTISED_10baseT_Full;
4003 if (tg3_flag(tp, WOL_SPEED_100MB))
4004 adv |= ADVERTISED_100baseT_Half |
4005 ADVERTISED_100baseT_Full;
4007 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4009 adv = tp->link_config.advertising;
4010 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4011 adv &= ~(ADVERTISED_1000baseT_Half |
4012 ADVERTISED_1000baseT_Full);
4014 fc = tp->link_config.flowctrl;
4017 tg3_phy_autoneg_cfg(tp, adv, fc);
4019 tg3_writephy(tp, MII_BMCR,
4020 BMCR_ANENABLE | BMCR_ANRESTART);
4023 u32 bmcr, orig_bmcr;
4025 tp->link_config.active_speed = tp->link_config.speed;
4026 tp->link_config.active_duplex = tp->link_config.duplex;
4029 switch (tp->link_config.speed) {
4035 bmcr |= BMCR_SPEED100;
4039 bmcr |= BMCR_SPEED1000;
4043 if (tp->link_config.duplex == DUPLEX_FULL)
4044 bmcr |= BMCR_FULLDPLX;
4046 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4047 (bmcr != orig_bmcr)) {
4048 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4049 for (i = 0; i < 1500; i++) {
4053 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4054 tg3_readphy(tp, MII_BMSR, &tmp))
4056 if (!(tmp & BMSR_LSTATUS)) {
4061 tg3_writephy(tp, MII_BMCR, bmcr);
4067 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4071 /* Turn off tap power management. */
4072 /* Set Extended packet length bit */
4073 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4075 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4076 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4077 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4078 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4079 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4086 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4088 u32 advmsk, tgtadv, advertising;
4090 advertising = tp->link_config.advertising;
4091 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4093 advmsk = ADVERTISE_ALL;
4094 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4095 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4096 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4099 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4102 if ((*lcladv & advmsk) != tgtadv)
4105 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4108 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4110 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4114 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4115 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4116 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4117 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4118 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4120 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4123 if (tg3_ctrl != tgtadv)
4130 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4134 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4137 if (tg3_readphy(tp, MII_STAT1000, &val))
4140 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4143 if (tg3_readphy(tp, MII_LPA, rmtadv))
4146 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4147 tp->link_config.rmt_adv = lpeth;
4152 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4154 int current_link_up;
4156 u32 lcl_adv, rmt_adv;
4164 (MAC_STATUS_SYNC_CHANGED |
4165 MAC_STATUS_CFG_CHANGED |
4166 MAC_STATUS_MI_COMPLETION |
4167 MAC_STATUS_LNKSTATE_CHANGED));
4170 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4172 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4176 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4178 /* Some third-party PHYs need to be reset on link going
4181 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4182 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4183 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4184 netif_carrier_ok(tp->dev)) {
4185 tg3_readphy(tp, MII_BMSR, &bmsr);
4186 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4187 !(bmsr & BMSR_LSTATUS))
4193 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4194 tg3_readphy(tp, MII_BMSR, &bmsr);
4195 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4196 !tg3_flag(tp, INIT_COMPLETE))
4199 if (!(bmsr & BMSR_LSTATUS)) {
4200 err = tg3_init_5401phy_dsp(tp);
4204 tg3_readphy(tp, MII_BMSR, &bmsr);
4205 for (i = 0; i < 1000; i++) {
4207 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4208 (bmsr & BMSR_LSTATUS)) {
4214 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4215 TG3_PHY_REV_BCM5401_B0 &&
4216 !(bmsr & BMSR_LSTATUS) &&
4217 tp->link_config.active_speed == SPEED_1000) {
4218 err = tg3_phy_reset(tp);
4220 err = tg3_init_5401phy_dsp(tp);
4225 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4226 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4227 /* 5701 {A0,B0} CRC bug workaround */
4228 tg3_writephy(tp, 0x15, 0x0a75);
4229 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4230 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4231 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4234 /* Clear pending interrupts... */
4235 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4236 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4238 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4239 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4240 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4241 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4243 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4244 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4245 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4246 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4247 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4249 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4252 current_link_up = 0;
4253 current_speed = SPEED_UNKNOWN;
4254 current_duplex = DUPLEX_UNKNOWN;
4255 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4256 tp->link_config.rmt_adv = 0;
4258 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4259 err = tg3_phy_auxctl_read(tp,
4260 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4262 if (!err && !(val & (1 << 10))) {
4263 tg3_phy_auxctl_write(tp,
4264 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4271 for (i = 0; i < 100; i++) {
4272 tg3_readphy(tp, MII_BMSR, &bmsr);
4273 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4274 (bmsr & BMSR_LSTATUS))
4279 if (bmsr & BMSR_LSTATUS) {
4282 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4283 for (i = 0; i < 2000; i++) {
4285 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4290 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4295 for (i = 0; i < 200; i++) {
4296 tg3_readphy(tp, MII_BMCR, &bmcr);
4297 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4299 if (bmcr && bmcr != 0x7fff)
4307 tp->link_config.active_speed = current_speed;
4308 tp->link_config.active_duplex = current_duplex;
4310 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4311 if ((bmcr & BMCR_ANENABLE) &&
4312 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4313 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4314 current_link_up = 1;
4316 if (!(bmcr & BMCR_ANENABLE) &&
4317 tp->link_config.speed == current_speed &&
4318 tp->link_config.duplex == current_duplex &&
4319 tp->link_config.flowctrl ==
4320 tp->link_config.active_flowctrl) {
4321 current_link_up = 1;
4325 if (current_link_up == 1 &&
4326 tp->link_config.active_duplex == DUPLEX_FULL) {
4329 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4330 reg = MII_TG3_FET_GEN_STAT;
4331 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4333 reg = MII_TG3_EXT_STAT;
4334 bit = MII_TG3_EXT_STAT_MDIX;
4337 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4338 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4340 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4345 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4346 tg3_phy_copper_begin(tp);
4348 tg3_readphy(tp, MII_BMSR, &bmsr);
4349 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4350 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4351 current_link_up = 1;
4354 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4355 if (current_link_up == 1) {
4356 if (tp->link_config.active_speed == SPEED_100 ||
4357 tp->link_config.active_speed == SPEED_10)
4358 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4360 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4361 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4362 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4364 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4366 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4367 if (tp->link_config.active_duplex == DUPLEX_HALF)
4368 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4370 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4371 if (current_link_up == 1 &&
4372 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4373 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4375 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4378 /* ??? Without this setting Netgear GA302T PHY does not
4379 * ??? send/receive packets...
4381 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4382 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4383 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4384 tw32_f(MAC_MI_MODE, tp->mi_mode);
4388 tw32_f(MAC_MODE, tp->mac_mode);
4391 tg3_phy_eee_adjust(tp, current_link_up);
4393 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4394 /* Polled via timer. */
4395 tw32_f(MAC_EVENT, 0);
4397 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4401 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4402 current_link_up == 1 &&
4403 tp->link_config.active_speed == SPEED_1000 &&
4404 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4407 (MAC_STATUS_SYNC_CHANGED |
4408 MAC_STATUS_CFG_CHANGED));
4411 NIC_SRAM_FIRMWARE_MBOX,
4412 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4415 /* Prevent send BD corruption. */
4416 if (tg3_flag(tp, CLKREQ_BUG)) {
4417 u16 oldlnkctl, newlnkctl;
4419 pci_read_config_word(tp->pdev,
4420 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4422 if (tp->link_config.active_speed == SPEED_100 ||
4423 tp->link_config.active_speed == SPEED_10)
4424 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4426 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4427 if (newlnkctl != oldlnkctl)
4428 pci_write_config_word(tp->pdev,
4429 pci_pcie_cap(tp->pdev) +
4430 PCI_EXP_LNKCTL, newlnkctl);
4433 if (current_link_up != netif_carrier_ok(tp->dev)) {
4434 if (current_link_up)
4435 netif_carrier_on(tp->dev);
4437 netif_carrier_off(tp->dev);
4438 tg3_link_report(tp);
4444 struct tg3_fiber_aneginfo {
4446 #define ANEG_STATE_UNKNOWN 0
4447 #define ANEG_STATE_AN_ENABLE 1
4448 #define ANEG_STATE_RESTART_INIT 2
4449 #define ANEG_STATE_RESTART 3
4450 #define ANEG_STATE_DISABLE_LINK_OK 4
4451 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4452 #define ANEG_STATE_ABILITY_DETECT 6
4453 #define ANEG_STATE_ACK_DETECT_INIT 7
4454 #define ANEG_STATE_ACK_DETECT 8
4455 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4456 #define ANEG_STATE_COMPLETE_ACK 10
4457 #define ANEG_STATE_IDLE_DETECT_INIT 11
4458 #define ANEG_STATE_IDLE_DETECT 12
4459 #define ANEG_STATE_LINK_OK 13
4460 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4461 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4464 #define MR_AN_ENABLE 0x00000001
4465 #define MR_RESTART_AN 0x00000002
4466 #define MR_AN_COMPLETE 0x00000004
4467 #define MR_PAGE_RX 0x00000008
4468 #define MR_NP_LOADED 0x00000010
4469 #define MR_TOGGLE_TX 0x00000020
4470 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4471 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4472 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4473 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4474 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4475 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4476 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4477 #define MR_TOGGLE_RX 0x00002000
4478 #define MR_NP_RX 0x00004000
4480 #define MR_LINK_OK 0x80000000
4482 unsigned long link_time, cur_time;
4484 u32 ability_match_cfg;
4485 int ability_match_count;
4487 char ability_match, idle_match, ack_match;
4489 u32 txconfig, rxconfig;
4490 #define ANEG_CFG_NP 0x00000080
4491 #define ANEG_CFG_ACK 0x00000040
4492 #define ANEG_CFG_RF2 0x00000020
4493 #define ANEG_CFG_RF1 0x00000010
4494 #define ANEG_CFG_PS2 0x00000001
4495 #define ANEG_CFG_PS1 0x00008000
4496 #define ANEG_CFG_HD 0x00004000
4497 #define ANEG_CFG_FD 0x00002000
4498 #define ANEG_CFG_INVAL 0x00001f06
4503 #define ANEG_TIMER_ENAB 2
4504 #define ANEG_FAILED -1
4506 #define ANEG_STATE_SETTLE_TIME 10000
4508 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4509 struct tg3_fiber_aneginfo *ap)
4512 unsigned long delta;
4516 if (ap->state == ANEG_STATE_UNKNOWN) {
4520 ap->ability_match_cfg = 0;
4521 ap->ability_match_count = 0;
4522 ap->ability_match = 0;
4528 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4529 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4531 if (rx_cfg_reg != ap->ability_match_cfg) {
4532 ap->ability_match_cfg = rx_cfg_reg;
4533 ap->ability_match = 0;
4534 ap->ability_match_count = 0;
4536 if (++ap->ability_match_count > 1) {
4537 ap->ability_match = 1;
4538 ap->ability_match_cfg = rx_cfg_reg;
4541 if (rx_cfg_reg & ANEG_CFG_ACK)
4549 ap->ability_match_cfg = 0;
4550 ap->ability_match_count = 0;
4551 ap->ability_match = 0;
4557 ap->rxconfig = rx_cfg_reg;
4560 switch (ap->state) {
4561 case ANEG_STATE_UNKNOWN:
4562 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4563 ap->state = ANEG_STATE_AN_ENABLE;
4566 case ANEG_STATE_AN_ENABLE:
4567 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4568 if (ap->flags & MR_AN_ENABLE) {
4571 ap->ability_match_cfg = 0;
4572 ap->ability_match_count = 0;
4573 ap->ability_match = 0;
4577 ap->state = ANEG_STATE_RESTART_INIT;
4579 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4583 case ANEG_STATE_RESTART_INIT:
4584 ap->link_time = ap->cur_time;
4585 ap->flags &= ~(MR_NP_LOADED);
4587 tw32(MAC_TX_AUTO_NEG, 0);
4588 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4589 tw32_f(MAC_MODE, tp->mac_mode);
4592 ret = ANEG_TIMER_ENAB;
4593 ap->state = ANEG_STATE_RESTART;
4596 case ANEG_STATE_RESTART:
4597 delta = ap->cur_time - ap->link_time;
4598 if (delta > ANEG_STATE_SETTLE_TIME)
4599 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4601 ret = ANEG_TIMER_ENAB;
4604 case ANEG_STATE_DISABLE_LINK_OK:
4608 case ANEG_STATE_ABILITY_DETECT_INIT:
4609 ap->flags &= ~(MR_TOGGLE_TX);
4610 ap->txconfig = ANEG_CFG_FD;
4611 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4612 if (flowctrl & ADVERTISE_1000XPAUSE)
4613 ap->txconfig |= ANEG_CFG_PS1;
4614 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4615 ap->txconfig |= ANEG_CFG_PS2;
4616 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4617 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4618 tw32_f(MAC_MODE, tp->mac_mode);
4621 ap->state = ANEG_STATE_ABILITY_DETECT;
4624 case ANEG_STATE_ABILITY_DETECT:
4625 if (ap->ability_match != 0 && ap->rxconfig != 0)
4626 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4629 case ANEG_STATE_ACK_DETECT_INIT:
4630 ap->txconfig |= ANEG_CFG_ACK;
4631 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4632 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4633 tw32_f(MAC_MODE, tp->mac_mode);
4636 ap->state = ANEG_STATE_ACK_DETECT;
4639 case ANEG_STATE_ACK_DETECT:
4640 if (ap->ack_match != 0) {
4641 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4642 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4643 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4645 ap->state = ANEG_STATE_AN_ENABLE;
4647 } else if (ap->ability_match != 0 &&
4648 ap->rxconfig == 0) {
4649 ap->state = ANEG_STATE_AN_ENABLE;
4653 case ANEG_STATE_COMPLETE_ACK_INIT:
4654 if (ap->rxconfig & ANEG_CFG_INVAL) {
4658 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4659 MR_LP_ADV_HALF_DUPLEX |
4660 MR_LP_ADV_SYM_PAUSE |
4661 MR_LP_ADV_ASYM_PAUSE |
4662 MR_LP_ADV_REMOTE_FAULT1 |
4663 MR_LP_ADV_REMOTE_FAULT2 |
4664 MR_LP_ADV_NEXT_PAGE |
4667 if (ap->rxconfig & ANEG_CFG_FD)
4668 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4669 if (ap->rxconfig & ANEG_CFG_HD)
4670 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4671 if (ap->rxconfig & ANEG_CFG_PS1)
4672 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4673 if (ap->rxconfig & ANEG_CFG_PS2)
4674 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4675 if (ap->rxconfig & ANEG_CFG_RF1)
4676 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4677 if (ap->rxconfig & ANEG_CFG_RF2)
4678 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4679 if (ap->rxconfig & ANEG_CFG_NP)
4680 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4682 ap->link_time = ap->cur_time;
4684 ap->flags ^= (MR_TOGGLE_TX);
4685 if (ap->rxconfig & 0x0008)
4686 ap->flags |= MR_TOGGLE_RX;
4687 if (ap->rxconfig & ANEG_CFG_NP)
4688 ap->flags |= MR_NP_RX;
4689 ap->flags |= MR_PAGE_RX;
4691 ap->state = ANEG_STATE_COMPLETE_ACK;
4692 ret = ANEG_TIMER_ENAB;
4695 case ANEG_STATE_COMPLETE_ACK:
4696 if (ap->ability_match != 0 &&
4697 ap->rxconfig == 0) {
4698 ap->state = ANEG_STATE_AN_ENABLE;
4701 delta = ap->cur_time - ap->link_time;
4702 if (delta > ANEG_STATE_SETTLE_TIME) {
4703 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4704 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4706 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4707 !(ap->flags & MR_NP_RX)) {
4708 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4716 case ANEG_STATE_IDLE_DETECT_INIT:
4717 ap->link_time = ap->cur_time;
4718 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4719 tw32_f(MAC_MODE, tp->mac_mode);
4722 ap->state = ANEG_STATE_IDLE_DETECT;
4723 ret = ANEG_TIMER_ENAB;
4726 case ANEG_STATE_IDLE_DETECT:
4727 if (ap->ability_match != 0 &&
4728 ap->rxconfig == 0) {
4729 ap->state = ANEG_STATE_AN_ENABLE;
4732 delta = ap->cur_time - ap->link_time;
4733 if (delta > ANEG_STATE_SETTLE_TIME) {
4734 /* XXX another gem from the Broadcom driver :( */
4735 ap->state = ANEG_STATE_LINK_OK;
4739 case ANEG_STATE_LINK_OK:
4740 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4744 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4745 /* ??? unimplemented */
4748 case ANEG_STATE_NEXT_PAGE_WAIT:
4749 /* ??? unimplemented */
4760 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4763 struct tg3_fiber_aneginfo aninfo;
4764 int status = ANEG_FAILED;
4768 tw32_f(MAC_TX_AUTO_NEG, 0);
4770 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4771 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4774 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4777 memset(&aninfo, 0, sizeof(aninfo));
4778 aninfo.flags |= MR_AN_ENABLE;
4779 aninfo.state = ANEG_STATE_UNKNOWN;
4780 aninfo.cur_time = 0;
4782 while (++tick < 195000) {
4783 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4784 if (status == ANEG_DONE || status == ANEG_FAILED)
4790 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4791 tw32_f(MAC_MODE, tp->mac_mode);
4794 *txflags = aninfo.txconfig;
4795 *rxflags = aninfo.flags;
4797 if (status == ANEG_DONE &&
4798 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4799 MR_LP_ADV_FULL_DUPLEX)))
4805 static void tg3_init_bcm8002(struct tg3 *tp)
4807 u32 mac_status = tr32(MAC_STATUS);
4810 /* Reset when initting first time or we have a link. */
4811 if (tg3_flag(tp, INIT_COMPLETE) &&
4812 !(mac_status & MAC_STATUS_PCS_SYNCED))
4815 /* Set PLL lock range. */
4816 tg3_writephy(tp, 0x16, 0x8007);
4819 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4821 /* Wait for reset to complete. */
4822 /* XXX schedule_timeout() ... */
4823 for (i = 0; i < 500; i++)
4826 /* Config mode; select PMA/Ch 1 regs. */
4827 tg3_writephy(tp, 0x10, 0x8411);
4829 /* Enable auto-lock and comdet, select txclk for tx. */
4830 tg3_writephy(tp, 0x11, 0x0a10);
4832 tg3_writephy(tp, 0x18, 0x00a0);
4833 tg3_writephy(tp, 0x16, 0x41ff);
4835 /* Assert and deassert POR. */
4836 tg3_writephy(tp, 0x13, 0x0400);
4838 tg3_writephy(tp, 0x13, 0x0000);
4840 tg3_writephy(tp, 0x11, 0x0a50);
4842 tg3_writephy(tp, 0x11, 0x0a10);
4844 /* Wait for signal to stabilize */
4845 /* XXX schedule_timeout() ... */
4846 for (i = 0; i < 15000; i++)
4849 /* Deselect the channel register so we can read the PHYID
4852 tg3_writephy(tp, 0x10, 0x8011);
4855 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4858 u32 sg_dig_ctrl, sg_dig_status;
4859 u32 serdes_cfg, expected_sg_dig_ctrl;
4860 int workaround, port_a;
4861 int current_link_up;
4864 expected_sg_dig_ctrl = 0;
4867 current_link_up = 0;
4869 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4870 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4872 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4875 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4876 /* preserve bits 20-23 for voltage regulator */
4877 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4880 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4882 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4883 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4885 u32 val = serdes_cfg;
4891 tw32_f(MAC_SERDES_CFG, val);
4894 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4896 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4897 tg3_setup_flow_control(tp, 0, 0);
4898 current_link_up = 1;
4903 /* Want auto-negotiation. */
4904 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4906 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4907 if (flowctrl & ADVERTISE_1000XPAUSE)
4908 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4909 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4910 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4912 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4913 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4914 tp->serdes_counter &&
4915 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4916 MAC_STATUS_RCVD_CFG)) ==
4917 MAC_STATUS_PCS_SYNCED)) {
4918 tp->serdes_counter--;
4919 current_link_up = 1;
4924 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4925 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4927 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4929 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4930 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4931 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4932 MAC_STATUS_SIGNAL_DET)) {
4933 sg_dig_status = tr32(SG_DIG_STATUS);
4934 mac_status = tr32(MAC_STATUS);
4936 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4937 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4938 u32 local_adv = 0, remote_adv = 0;
4940 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4941 local_adv |= ADVERTISE_1000XPAUSE;
4942 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4943 local_adv |= ADVERTISE_1000XPSE_ASYM;
4945 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4946 remote_adv |= LPA_1000XPAUSE;
4947 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4948 remote_adv |= LPA_1000XPAUSE_ASYM;
4950 tp->link_config.rmt_adv =
4951 mii_adv_to_ethtool_adv_x(remote_adv);
4953 tg3_setup_flow_control(tp, local_adv, remote_adv);
4954 current_link_up = 1;
4955 tp->serdes_counter = 0;
4956 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4957 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4958 if (tp->serdes_counter)
4959 tp->serdes_counter--;
4962 u32 val = serdes_cfg;
4969 tw32_f(MAC_SERDES_CFG, val);
4972 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4975 /* Link parallel detection - link is up */
4976 /* only if we have PCS_SYNC and not */
4977 /* receiving config code words */
4978 mac_status = tr32(MAC_STATUS);
4979 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4980 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4981 tg3_setup_flow_control(tp, 0, 0);
4982 current_link_up = 1;
4984 TG3_PHYFLG_PARALLEL_DETECT;
4985 tp->serdes_counter =
4986 SERDES_PARALLEL_DET_TIMEOUT;
4988 goto restart_autoneg;
4992 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4993 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4997 return current_link_up;
5000 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5002 int current_link_up = 0;
5004 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5007 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5008 u32 txflags, rxflags;
5011 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5012 u32 local_adv = 0, remote_adv = 0;
5014 if (txflags & ANEG_CFG_PS1)
5015 local_adv |= ADVERTISE_1000XPAUSE;
5016 if (txflags & ANEG_CFG_PS2)
5017 local_adv |= ADVERTISE_1000XPSE_ASYM;
5019 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5020 remote_adv |= LPA_1000XPAUSE;
5021 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5022 remote_adv |= LPA_1000XPAUSE_ASYM;
5024 tp->link_config.rmt_adv =
5025 mii_adv_to_ethtool_adv_x(remote_adv);
5027 tg3_setup_flow_control(tp, local_adv, remote_adv);
5029 current_link_up = 1;
5031 for (i = 0; i < 30; i++) {
5034 (MAC_STATUS_SYNC_CHANGED |
5035 MAC_STATUS_CFG_CHANGED));
5037 if ((tr32(MAC_STATUS) &
5038 (MAC_STATUS_SYNC_CHANGED |
5039 MAC_STATUS_CFG_CHANGED)) == 0)
5043 mac_status = tr32(MAC_STATUS);
5044 if (current_link_up == 0 &&
5045 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5046 !(mac_status & MAC_STATUS_RCVD_CFG))
5047 current_link_up = 1;
5049 tg3_setup_flow_control(tp, 0, 0);
5051 /* Forcing 1000FD link up. */
5052 current_link_up = 1;
5054 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5057 tw32_f(MAC_MODE, tp->mac_mode);
5062 return current_link_up;
5065 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5068 u16 orig_active_speed;
5069 u8 orig_active_duplex;
5071 int current_link_up;
5074 orig_pause_cfg = tp->link_config.active_flowctrl;
5075 orig_active_speed = tp->link_config.active_speed;
5076 orig_active_duplex = tp->link_config.active_duplex;
5078 if (!tg3_flag(tp, HW_AUTONEG) &&
5079 netif_carrier_ok(tp->dev) &&
5080 tg3_flag(tp, INIT_COMPLETE)) {
5081 mac_status = tr32(MAC_STATUS);
5082 mac_status &= (MAC_STATUS_PCS_SYNCED |
5083 MAC_STATUS_SIGNAL_DET |
5084 MAC_STATUS_CFG_CHANGED |
5085 MAC_STATUS_RCVD_CFG);
5086 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5087 MAC_STATUS_SIGNAL_DET)) {
5088 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5089 MAC_STATUS_CFG_CHANGED));
5094 tw32_f(MAC_TX_AUTO_NEG, 0);
5096 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5097 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5098 tw32_f(MAC_MODE, tp->mac_mode);
5101 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5102 tg3_init_bcm8002(tp);
5104 /* Enable link change event even when serdes polling. */
5105 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5108 current_link_up = 0;
5109 tp->link_config.rmt_adv = 0;
5110 mac_status = tr32(MAC_STATUS);
5112 if (tg3_flag(tp, HW_AUTONEG))
5113 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5115 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5117 tp->napi[0].hw_status->status =
5118 (SD_STATUS_UPDATED |
5119 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5121 for (i = 0; i < 100; i++) {
5122 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5123 MAC_STATUS_CFG_CHANGED));
5125 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5126 MAC_STATUS_CFG_CHANGED |
5127 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5131 mac_status = tr32(MAC_STATUS);
5132 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5133 current_link_up = 0;
5134 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5135 tp->serdes_counter == 0) {
5136 tw32_f(MAC_MODE, (tp->mac_mode |
5137 MAC_MODE_SEND_CONFIGS));
5139 tw32_f(MAC_MODE, tp->mac_mode);
5143 if (current_link_up == 1) {
5144 tp->link_config.active_speed = SPEED_1000;
5145 tp->link_config.active_duplex = DUPLEX_FULL;
5146 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5147 LED_CTRL_LNKLED_OVERRIDE |
5148 LED_CTRL_1000MBPS_ON));
5150 tp->link_config.active_speed = SPEED_UNKNOWN;
5151 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5152 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5153 LED_CTRL_LNKLED_OVERRIDE |
5154 LED_CTRL_TRAFFIC_OVERRIDE));
5157 if (current_link_up != netif_carrier_ok(tp->dev)) {
5158 if (current_link_up)
5159 netif_carrier_on(tp->dev);
5161 netif_carrier_off(tp->dev);
5162 tg3_link_report(tp);
5164 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5165 if (orig_pause_cfg != now_pause_cfg ||
5166 orig_active_speed != tp->link_config.active_speed ||
5167 orig_active_duplex != tp->link_config.active_duplex)
5168 tg3_link_report(tp);
5174 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5176 int current_link_up, err = 0;
5180 u32 local_adv, remote_adv;
5182 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5183 tw32_f(MAC_MODE, tp->mac_mode);
5189 (MAC_STATUS_SYNC_CHANGED |
5190 MAC_STATUS_CFG_CHANGED |
5191 MAC_STATUS_MI_COMPLETION |
5192 MAC_STATUS_LNKSTATE_CHANGED));
5198 current_link_up = 0;
5199 current_speed = SPEED_UNKNOWN;
5200 current_duplex = DUPLEX_UNKNOWN;
5201 tp->link_config.rmt_adv = 0;
5203 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5204 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5205 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5206 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5207 bmsr |= BMSR_LSTATUS;
5209 bmsr &= ~BMSR_LSTATUS;
5212 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5214 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5215 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5216 /* do nothing, just check for link up at the end */
5217 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5220 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5221 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5222 ADVERTISE_1000XPAUSE |
5223 ADVERTISE_1000XPSE_ASYM |
5226 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5227 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5229 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5230 tg3_writephy(tp, MII_ADVERTISE, newadv);
5231 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5232 tg3_writephy(tp, MII_BMCR, bmcr);
5234 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5235 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5236 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243 bmcr &= ~BMCR_SPEED1000;
5244 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5246 if (tp->link_config.duplex == DUPLEX_FULL)
5247 new_bmcr |= BMCR_FULLDPLX;
5249 if (new_bmcr != bmcr) {
5250 /* BMCR_SPEED1000 is a reserved bit that needs
5251 * to be set on write.
5253 new_bmcr |= BMCR_SPEED1000;
5255 /* Force a linkdown */
5256 if (netif_carrier_ok(tp->dev)) {
5259 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5260 adv &= ~(ADVERTISE_1000XFULL |
5261 ADVERTISE_1000XHALF |
5263 tg3_writephy(tp, MII_ADVERTISE, adv);
5264 tg3_writephy(tp, MII_BMCR, bmcr |
5268 netif_carrier_off(tp->dev);
5270 tg3_writephy(tp, MII_BMCR, new_bmcr);
5272 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5273 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5274 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5276 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5277 bmsr |= BMSR_LSTATUS;
5279 bmsr &= ~BMSR_LSTATUS;
5281 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5285 if (bmsr & BMSR_LSTATUS) {
5286 current_speed = SPEED_1000;
5287 current_link_up = 1;
5288 if (bmcr & BMCR_FULLDPLX)
5289 current_duplex = DUPLEX_FULL;
5291 current_duplex = DUPLEX_HALF;
5296 if (bmcr & BMCR_ANENABLE) {
5299 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5300 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5301 common = local_adv & remote_adv;
5302 if (common & (ADVERTISE_1000XHALF |
5303 ADVERTISE_1000XFULL)) {
5304 if (common & ADVERTISE_1000XFULL)
5305 current_duplex = DUPLEX_FULL;
5307 current_duplex = DUPLEX_HALF;
5309 tp->link_config.rmt_adv =
5310 mii_adv_to_ethtool_adv_x(remote_adv);
5311 } else if (!tg3_flag(tp, 5780_CLASS)) {
5312 /* Link is up via parallel detect */
5314 current_link_up = 0;
5319 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5320 tg3_setup_flow_control(tp, local_adv, remote_adv);
5322 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5323 if (tp->link_config.active_duplex == DUPLEX_HALF)
5324 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5326 tw32_f(MAC_MODE, tp->mac_mode);
5329 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5331 tp->link_config.active_speed = current_speed;
5332 tp->link_config.active_duplex = current_duplex;
5334 if (current_link_up != netif_carrier_ok(tp->dev)) {
5335 if (current_link_up)
5336 netif_carrier_on(tp->dev);
5338 netif_carrier_off(tp->dev);
5339 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5341 tg3_link_report(tp);
5346 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5348 if (tp->serdes_counter) {
5349 /* Give autoneg time to complete. */
5350 tp->serdes_counter--;
5354 if (!netif_carrier_ok(tp->dev) &&
5355 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5358 tg3_readphy(tp, MII_BMCR, &bmcr);
5359 if (bmcr & BMCR_ANENABLE) {
5362 /* Select shadow register 0x1f */
5363 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5364 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5366 /* Select expansion interrupt status register */
5367 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5368 MII_TG3_DSP_EXP1_INT_STAT);
5369 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5370 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5372 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5373 /* We have signal detect and not receiving
5374 * config code words, link is up by parallel
5378 bmcr &= ~BMCR_ANENABLE;
5379 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5380 tg3_writephy(tp, MII_BMCR, bmcr);
5381 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5384 } else if (netif_carrier_ok(tp->dev) &&
5385 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5386 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5389 /* Select expansion interrupt status register */
5390 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5391 MII_TG3_DSP_EXP1_INT_STAT);
5392 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5396 /* Config code words received, turn on autoneg. */
5397 tg3_readphy(tp, MII_BMCR, &bmcr);
5398 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5400 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5406 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5411 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5412 err = tg3_setup_fiber_phy(tp, force_reset);
5413 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5414 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5416 err = tg3_setup_copper_phy(tp, force_reset);
5418 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5421 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5422 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5424 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5429 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5430 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5431 tw32(GRC_MISC_CFG, val);
5434 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5435 (6 << TX_LENGTHS_IPG_SHIFT);
5436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5437 val |= tr32(MAC_TX_LENGTHS) &
5438 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5439 TX_LENGTHS_CNT_DWN_VAL_MSK);
5441 if (tp->link_config.active_speed == SPEED_1000 &&
5442 tp->link_config.active_duplex == DUPLEX_HALF)
5443 tw32(MAC_TX_LENGTHS, val |
5444 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5446 tw32(MAC_TX_LENGTHS, val |
5447 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5449 if (!tg3_flag(tp, 5705_PLUS)) {
5450 if (netif_carrier_ok(tp->dev)) {
5451 tw32(HOSTCC_STAT_COAL_TICKS,
5452 tp->coal.stats_block_coalesce_usecs);
5454 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5458 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5459 val = tr32(PCIE_PWR_MGMT_THRESH);
5460 if (!netif_carrier_ok(tp->dev))
5461 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5464 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5465 tw32(PCIE_PWR_MGMT_THRESH, val);
5471 static inline int tg3_irq_sync(struct tg3 *tp)
5473 return tp->irq_sync;
5476 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5480 dst = (u32 *)((u8 *)dst + off);
5481 for (i = 0; i < len; i += sizeof(u32))
5482 *dst++ = tr32(off + i);
5485 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5487 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5488 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5489 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5490 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5491 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5492 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5493 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5494 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5495 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5496 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5497 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5498 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5499 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5500 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5501 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5502 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5503 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5504 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5505 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5507 if (tg3_flag(tp, SUPPORT_MSIX))
5508 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5510 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5511 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5512 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5513 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5514 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5515 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5516 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5517 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5519 if (!tg3_flag(tp, 5705_PLUS)) {
5520 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5521 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5522 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5525 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5526 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5527 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5528 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5529 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5531 if (tg3_flag(tp, NVRAM))
5532 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5535 static void tg3_dump_state(struct tg3 *tp)
5540 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5542 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5546 if (tg3_flag(tp, PCI_EXPRESS)) {
5547 /* Read up to but not including private PCI registers */
5548 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5549 regs[i / sizeof(u32)] = tr32(i);
5551 tg3_dump_legacy_regs(tp, regs);
5553 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5554 if (!regs[i + 0] && !regs[i + 1] &&
5555 !regs[i + 2] && !regs[i + 3])
5558 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5560 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5565 for (i = 0; i < tp->irq_cnt; i++) {
5566 struct tg3_napi *tnapi = &tp->napi[i];
5568 /* SW status block */
5570 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5572 tnapi->hw_status->status,
5573 tnapi->hw_status->status_tag,
5574 tnapi->hw_status->rx_jumbo_consumer,
5575 tnapi->hw_status->rx_consumer,
5576 tnapi->hw_status->rx_mini_consumer,
5577 tnapi->hw_status->idx[0].rx_producer,
5578 tnapi->hw_status->idx[0].tx_consumer);
5581 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5583 tnapi->last_tag, tnapi->last_irq_tag,
5584 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5586 tnapi->prodring.rx_std_prod_idx,
5587 tnapi->prodring.rx_std_cons_idx,
5588 tnapi->prodring.rx_jmb_prod_idx,
5589 tnapi->prodring.rx_jmb_cons_idx);
5593 /* This is called whenever we suspect that the system chipset is re-
5594 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5595 * is bogus tx completions. We try to recover by setting the
5596 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5599 static void tg3_tx_recover(struct tg3 *tp)
5601 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5602 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5604 netdev_warn(tp->dev,
5605 "The system may be re-ordering memory-mapped I/O "
5606 "cycles to the network device, attempting to recover. "
5607 "Please report the problem to the driver maintainer "
5608 "and include system chipset information.\n");
5610 spin_lock(&tp->lock);
5611 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5612 spin_unlock(&tp->lock);
5615 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5617 /* Tell compiler to fetch tx indices from memory. */
5619 return tnapi->tx_pending -
5620 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5623 /* Tigon3 never reports partial packet sends. So we do not
5624 * need special logic to handle SKBs that have not had all
5625 * of their frags sent yet, like SunGEM does.
5627 static void tg3_tx(struct tg3_napi *tnapi)
5629 struct tg3 *tp = tnapi->tp;
5630 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5631 u32 sw_idx = tnapi->tx_cons;
5632 struct netdev_queue *txq;
5633 int index = tnapi - tp->napi;
5634 unsigned int pkts_compl = 0, bytes_compl = 0;
5636 if (tg3_flag(tp, ENABLE_TSS))
5639 txq = netdev_get_tx_queue(tp->dev, index);
5641 while (sw_idx != hw_idx) {
5642 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5643 struct sk_buff *skb = ri->skb;
5646 if (unlikely(skb == NULL)) {
5651 pci_unmap_single(tp->pdev,
5652 dma_unmap_addr(ri, mapping),
5658 while (ri->fragmented) {
5659 ri->fragmented = false;
5660 sw_idx = NEXT_TX(sw_idx);
5661 ri = &tnapi->tx_buffers[sw_idx];
5664 sw_idx = NEXT_TX(sw_idx);
5666 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5667 ri = &tnapi->tx_buffers[sw_idx];
5668 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5671 pci_unmap_page(tp->pdev,
5672 dma_unmap_addr(ri, mapping),
5673 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5676 while (ri->fragmented) {
5677 ri->fragmented = false;
5678 sw_idx = NEXT_TX(sw_idx);
5679 ri = &tnapi->tx_buffers[sw_idx];
5682 sw_idx = NEXT_TX(sw_idx);
5686 bytes_compl += skb->len;
5690 if (unlikely(tx_bug)) {
5696 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5698 tnapi->tx_cons = sw_idx;
5700 /* Need to make the tx_cons update visible to tg3_start_xmit()
5701 * before checking for netif_queue_stopped(). Without the
5702 * memory barrier, there is a small possibility that tg3_start_xmit()
5703 * will miss it and cause the queue to be stopped forever.
5707 if (unlikely(netif_tx_queue_stopped(txq) &&
5708 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5709 __netif_tx_lock(txq, smp_processor_id());
5710 if (netif_tx_queue_stopped(txq) &&
5711 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5712 netif_tx_wake_queue(txq);
5713 __netif_tx_unlock(txq);
5717 static void tg3_frag_free(bool is_frag, void *data)
5720 put_page(virt_to_head_page(data));
5725 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5727 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5728 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5733 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5734 map_sz, PCI_DMA_FROMDEVICE);
5735 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5740 /* Returns size of skb allocated or < 0 on error.
5742 * We only need to fill in the address because the other members
5743 * of the RX descriptor are invariant, see tg3_init_rings.
5745 * Note the purposeful assymetry of cpu vs. chip accesses. For
5746 * posting buffers we only dirty the first cache line of the RX
5747 * descriptor (containing the address). Whereas for the RX status
5748 * buffers the cpu only reads the last cacheline of the RX descriptor
5749 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5751 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5752 u32 opaque_key, u32 dest_idx_unmasked,
5753 unsigned int *frag_size)
5755 struct tg3_rx_buffer_desc *desc;
5756 struct ring_info *map;
5759 int skb_size, data_size, dest_idx;
5761 switch (opaque_key) {
5762 case RXD_OPAQUE_RING_STD:
5763 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5764 desc = &tpr->rx_std[dest_idx];
5765 map = &tpr->rx_std_buffers[dest_idx];
5766 data_size = tp->rx_pkt_map_sz;
5769 case RXD_OPAQUE_RING_JUMBO:
5770 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5771 desc = &tpr->rx_jmb[dest_idx].std;
5772 map = &tpr->rx_jmb_buffers[dest_idx];
5773 data_size = TG3_RX_JMB_MAP_SZ;
5780 /* Do not overwrite any of the map or rp information
5781 * until we are sure we can commit to a new buffer.
5783 * Callers depend upon this behavior and assume that
5784 * we leave everything unchanged if we fail.
5786 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5787 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5788 if (skb_size <= PAGE_SIZE) {
5789 data = netdev_alloc_frag(skb_size);
5790 *frag_size = skb_size;
5792 data = kmalloc(skb_size, GFP_ATOMIC);
5798 mapping = pci_map_single(tp->pdev,
5799 data + TG3_RX_OFFSET(tp),
5801 PCI_DMA_FROMDEVICE);
5802 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5803 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5808 dma_unmap_addr_set(map, mapping, mapping);
5810 desc->addr_hi = ((u64)mapping >> 32);
5811 desc->addr_lo = ((u64)mapping & 0xffffffff);
5816 /* We only need to move over in the address because the other
5817 * members of the RX descriptor are invariant. See notes above
5818 * tg3_alloc_rx_data for full details.
5820 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5821 struct tg3_rx_prodring_set *dpr,
5822 u32 opaque_key, int src_idx,
5823 u32 dest_idx_unmasked)
5825 struct tg3 *tp = tnapi->tp;
5826 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5827 struct ring_info *src_map, *dest_map;
5828 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5831 switch (opaque_key) {
5832 case RXD_OPAQUE_RING_STD:
5833 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5834 dest_desc = &dpr->rx_std[dest_idx];
5835 dest_map = &dpr->rx_std_buffers[dest_idx];
5836 src_desc = &spr->rx_std[src_idx];
5837 src_map = &spr->rx_std_buffers[src_idx];
5840 case RXD_OPAQUE_RING_JUMBO:
5841 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5842 dest_desc = &dpr->rx_jmb[dest_idx].std;
5843 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5844 src_desc = &spr->rx_jmb[src_idx].std;
5845 src_map = &spr->rx_jmb_buffers[src_idx];
5852 dest_map->data = src_map->data;
5853 dma_unmap_addr_set(dest_map, mapping,
5854 dma_unmap_addr(src_map, mapping));
5855 dest_desc->addr_hi = src_desc->addr_hi;
5856 dest_desc->addr_lo = src_desc->addr_lo;
5858 /* Ensure that the update to the skb happens after the physical
5859 * addresses have been transferred to the new BD location.
5863 src_map->data = NULL;
5866 /* The RX ring scheme is composed of multiple rings which post fresh
5867 * buffers to the chip, and one special ring the chip uses to report
5868 * status back to the host.
5870 * The special ring reports the status of received packets to the
5871 * host. The chip does not write into the original descriptor the
5872 * RX buffer was obtained from. The chip simply takes the original
5873 * descriptor as provided by the host, updates the status and length
5874 * field, then writes this into the next status ring entry.
5876 * Each ring the host uses to post buffers to the chip is described
5877 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5878 * it is first placed into the on-chip ram. When the packet's length
5879 * is known, it walks down the TG3_BDINFO entries to select the ring.
5880 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5881 * which is within the range of the new packet's length is chosen.
5883 * The "separate ring for rx status" scheme may sound queer, but it makes
5884 * sense from a cache coherency perspective. If only the host writes
5885 * to the buffer post rings, and only the chip writes to the rx status
5886 * rings, then cache lines never move beyond shared-modified state.
5887 * If both the host and chip were to write into the same ring, cache line
5888 * eviction could occur since both entities want it in an exclusive state.
5890 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5892 struct tg3 *tp = tnapi->tp;
5893 u32 work_mask, rx_std_posted = 0;
5894 u32 std_prod_idx, jmb_prod_idx;
5895 u32 sw_idx = tnapi->rx_rcb_ptr;
5898 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5900 hw_idx = *(tnapi->rx_rcb_prod_idx);
5902 * We need to order the read of hw_idx and the read of
5903 * the opaque cookie.
5908 std_prod_idx = tpr->rx_std_prod_idx;
5909 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5910 while (sw_idx != hw_idx && budget > 0) {
5911 struct ring_info *ri;
5912 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5914 struct sk_buff *skb;
5915 dma_addr_t dma_addr;
5916 u32 opaque_key, desc_idx, *post_ptr;
5919 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5920 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5921 if (opaque_key == RXD_OPAQUE_RING_STD) {
5922 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5923 dma_addr = dma_unmap_addr(ri, mapping);
5925 post_ptr = &std_prod_idx;
5927 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5928 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5929 dma_addr = dma_unmap_addr(ri, mapping);
5931 post_ptr = &jmb_prod_idx;
5933 goto next_pkt_nopost;
5935 work_mask |= opaque_key;
5937 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5938 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5940 tg3_recycle_rx(tnapi, tpr, opaque_key,
5941 desc_idx, *post_ptr);
5943 /* Other statistics kept track of by card. */
5948 prefetch(data + TG3_RX_OFFSET(tp));
5949 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5952 if (len > TG3_RX_COPY_THRESH(tp)) {
5954 unsigned int frag_size;
5956 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5957 *post_ptr, &frag_size);
5961 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5962 PCI_DMA_FROMDEVICE);
5964 skb = build_skb(data, frag_size);
5966 tg3_frag_free(frag_size != 0, data);
5967 goto drop_it_no_recycle;
5969 skb_reserve(skb, TG3_RX_OFFSET(tp));
5970 /* Ensure that the update to the data happens
5971 * after the usage of the old DMA mapping.
5978 tg3_recycle_rx(tnapi, tpr, opaque_key,
5979 desc_idx, *post_ptr);
5981 skb = netdev_alloc_skb(tp->dev,
5982 len + TG3_RAW_IP_ALIGN);
5984 goto drop_it_no_recycle;
5986 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5987 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5989 data + TG3_RX_OFFSET(tp),
5991 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5995 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5996 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5997 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5998 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5999 skb->ip_summed = CHECKSUM_UNNECESSARY;
6001 skb_checksum_none_assert(skb);
6003 skb->protocol = eth_type_trans(skb, tp->dev);
6005 if (len > (tp->dev->mtu + ETH_HLEN) &&
6006 skb->protocol != htons(ETH_P_8021Q)) {
6008 goto drop_it_no_recycle;
6011 if (desc->type_flags & RXD_FLAG_VLAN &&
6012 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6013 __vlan_hwaccel_put_tag(skb,
6014 desc->err_vlan & RXD_VLAN_MASK);
6016 napi_gro_receive(&tnapi->napi, skb);
6024 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6025 tpr->rx_std_prod_idx = std_prod_idx &
6026 tp->rx_std_ring_mask;
6027 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6028 tpr->rx_std_prod_idx);
6029 work_mask &= ~RXD_OPAQUE_RING_STD;
6034 sw_idx &= tp->rx_ret_ring_mask;
6036 /* Refresh hw_idx to see if there is new work */
6037 if (sw_idx == hw_idx) {
6038 hw_idx = *(tnapi->rx_rcb_prod_idx);
6043 /* ACK the status ring. */
6044 tnapi->rx_rcb_ptr = sw_idx;
6045 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6047 /* Refill RX ring(s). */
6048 if (!tg3_flag(tp, ENABLE_RSS)) {
6049 /* Sync BD data before updating mailbox */
6052 if (work_mask & RXD_OPAQUE_RING_STD) {
6053 tpr->rx_std_prod_idx = std_prod_idx &
6054 tp->rx_std_ring_mask;
6055 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6056 tpr->rx_std_prod_idx);
6058 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6059 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6060 tp->rx_jmb_ring_mask;
6061 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6062 tpr->rx_jmb_prod_idx);
6065 } else if (work_mask) {
6066 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6067 * updated before the producer indices can be updated.
6071 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6072 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6074 if (tnapi != &tp->napi[1]) {
6075 tp->rx_refill = true;
6076 napi_schedule(&tp->napi[1].napi);
6083 static void tg3_poll_link(struct tg3 *tp)
6085 /* handle link change and other phy events */
6086 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6087 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6089 if (sblk->status & SD_STATUS_LINK_CHG) {
6090 sblk->status = SD_STATUS_UPDATED |
6091 (sblk->status & ~SD_STATUS_LINK_CHG);
6092 spin_lock(&tp->lock);
6093 if (tg3_flag(tp, USE_PHYLIB)) {
6095 (MAC_STATUS_SYNC_CHANGED |
6096 MAC_STATUS_CFG_CHANGED |
6097 MAC_STATUS_MI_COMPLETION |
6098 MAC_STATUS_LNKSTATE_CHANGED));
6101 tg3_setup_phy(tp, 0);
6102 spin_unlock(&tp->lock);
6107 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6108 struct tg3_rx_prodring_set *dpr,
6109 struct tg3_rx_prodring_set *spr)
6111 u32 si, di, cpycnt, src_prod_idx;
6115 src_prod_idx = spr->rx_std_prod_idx;
6117 /* Make sure updates to the rx_std_buffers[] entries and the
6118 * standard producer index are seen in the correct order.
6122 if (spr->rx_std_cons_idx == src_prod_idx)
6125 if (spr->rx_std_cons_idx < src_prod_idx)
6126 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6128 cpycnt = tp->rx_std_ring_mask + 1 -
6129 spr->rx_std_cons_idx;
6131 cpycnt = min(cpycnt,
6132 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6134 si = spr->rx_std_cons_idx;
6135 di = dpr->rx_std_prod_idx;
6137 for (i = di; i < di + cpycnt; i++) {
6138 if (dpr->rx_std_buffers[i].data) {
6148 /* Ensure that updates to the rx_std_buffers ring and the
6149 * shadowed hardware producer ring from tg3_recycle_skb() are
6150 * ordered correctly WRT the skb check above.
6154 memcpy(&dpr->rx_std_buffers[di],
6155 &spr->rx_std_buffers[si],
6156 cpycnt * sizeof(struct ring_info));
6158 for (i = 0; i < cpycnt; i++, di++, si++) {
6159 struct tg3_rx_buffer_desc *sbd, *dbd;
6160 sbd = &spr->rx_std[si];
6161 dbd = &dpr->rx_std[di];
6162 dbd->addr_hi = sbd->addr_hi;
6163 dbd->addr_lo = sbd->addr_lo;
6166 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6167 tp->rx_std_ring_mask;
6168 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6169 tp->rx_std_ring_mask;
6173 src_prod_idx = spr->rx_jmb_prod_idx;
6175 /* Make sure updates to the rx_jmb_buffers[] entries and
6176 * the jumbo producer index are seen in the correct order.
6180 if (spr->rx_jmb_cons_idx == src_prod_idx)
6183 if (spr->rx_jmb_cons_idx < src_prod_idx)
6184 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6186 cpycnt = tp->rx_jmb_ring_mask + 1 -
6187 spr->rx_jmb_cons_idx;
6189 cpycnt = min(cpycnt,
6190 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6192 si = spr->rx_jmb_cons_idx;
6193 di = dpr->rx_jmb_prod_idx;
6195 for (i = di; i < di + cpycnt; i++) {
6196 if (dpr->rx_jmb_buffers[i].data) {
6206 /* Ensure that updates to the rx_jmb_buffers ring and the
6207 * shadowed hardware producer ring from tg3_recycle_skb() are
6208 * ordered correctly WRT the skb check above.
6212 memcpy(&dpr->rx_jmb_buffers[di],
6213 &spr->rx_jmb_buffers[si],
6214 cpycnt * sizeof(struct ring_info));
6216 for (i = 0; i < cpycnt; i++, di++, si++) {
6217 struct tg3_rx_buffer_desc *sbd, *dbd;
6218 sbd = &spr->rx_jmb[si].std;
6219 dbd = &dpr->rx_jmb[di].std;
6220 dbd->addr_hi = sbd->addr_hi;
6221 dbd->addr_lo = sbd->addr_lo;
6224 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6225 tp->rx_jmb_ring_mask;
6226 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6227 tp->rx_jmb_ring_mask;
6233 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6235 struct tg3 *tp = tnapi->tp;
6237 /* run TX completion thread */
6238 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6240 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6244 if (!tnapi->rx_rcb_prod_idx)
6247 /* run RX thread, within the bounds set by NAPI.
6248 * All RX "locking" is done by ensuring outside
6249 * code synchronizes with tg3->napi.poll()
6251 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6252 work_done += tg3_rx(tnapi, budget - work_done);
6254 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6255 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6257 u32 std_prod_idx = dpr->rx_std_prod_idx;
6258 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6260 tp->rx_refill = false;
6261 for (i = 1; i < tp->irq_cnt; i++)
6262 err |= tg3_rx_prodring_xfer(tp, dpr,
6263 &tp->napi[i].prodring);
6267 if (std_prod_idx != dpr->rx_std_prod_idx)
6268 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6269 dpr->rx_std_prod_idx);
6271 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6272 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6273 dpr->rx_jmb_prod_idx);
6278 tw32_f(HOSTCC_MODE, tp->coal_now);
6284 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6286 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6287 schedule_work(&tp->reset_task);
6290 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6292 cancel_work_sync(&tp->reset_task);
6293 tg3_flag_clear(tp, RESET_TASK_PENDING);
6294 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6297 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6299 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6300 struct tg3 *tp = tnapi->tp;
6302 struct tg3_hw_status *sblk = tnapi->hw_status;
6305 work_done = tg3_poll_work(tnapi, work_done, budget);
6307 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6310 if (unlikely(work_done >= budget))
6313 /* tp->last_tag is used in tg3_int_reenable() below
6314 * to tell the hw how much work has been processed,
6315 * so we must read it before checking for more work.
6317 tnapi->last_tag = sblk->status_tag;
6318 tnapi->last_irq_tag = tnapi->last_tag;
6321 /* check for RX/TX work to do */
6322 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6323 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6325 /* This test here is not race free, but will reduce
6326 * the number of interrupts by looping again.
6328 if (tnapi == &tp->napi[1] && tp->rx_refill)
6331 napi_complete(napi);
6332 /* Reenable interrupts. */
6333 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6335 /* This test here is synchronized by napi_schedule()
6336 * and napi_complete() to close the race condition.
6338 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6339 tw32(HOSTCC_MODE, tp->coalesce_mode |
6340 HOSTCC_MODE_ENABLE |
6351 /* work_done is guaranteed to be less than budget. */
6352 napi_complete(napi);
6353 tg3_reset_task_schedule(tp);
6357 static void tg3_process_error(struct tg3 *tp)
6360 bool real_error = false;
6362 if (tg3_flag(tp, ERROR_PROCESSED))
6365 /* Check Flow Attention register */
6366 val = tr32(HOSTCC_FLOW_ATTN);
6367 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6368 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6372 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6373 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6377 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6378 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6387 tg3_flag_set(tp, ERROR_PROCESSED);
6388 tg3_reset_task_schedule(tp);
6391 static int tg3_poll(struct napi_struct *napi, int budget)
6393 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6394 struct tg3 *tp = tnapi->tp;
6396 struct tg3_hw_status *sblk = tnapi->hw_status;
6399 if (sblk->status & SD_STATUS_ERROR)
6400 tg3_process_error(tp);
6404 work_done = tg3_poll_work(tnapi, work_done, budget);
6406 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6409 if (unlikely(work_done >= budget))
6412 if (tg3_flag(tp, TAGGED_STATUS)) {
6413 /* tp->last_tag is used in tg3_int_reenable() below
6414 * to tell the hw how much work has been processed,
6415 * so we must read it before checking for more work.
6417 tnapi->last_tag = sblk->status_tag;
6418 tnapi->last_irq_tag = tnapi->last_tag;
6421 sblk->status &= ~SD_STATUS_UPDATED;
6423 if (likely(!tg3_has_work(tnapi))) {
6424 napi_complete(napi);
6425 tg3_int_reenable(tnapi);
6433 /* work_done is guaranteed to be less than budget. */
6434 napi_complete(napi);
6435 tg3_reset_task_schedule(tp);
6439 static void tg3_napi_disable(struct tg3 *tp)
6443 for (i = tp->irq_cnt - 1; i >= 0; i--)
6444 napi_disable(&tp->napi[i].napi);
6447 static void tg3_napi_enable(struct tg3 *tp)
6451 for (i = 0; i < tp->irq_cnt; i++)
6452 napi_enable(&tp->napi[i].napi);
6455 static void tg3_napi_init(struct tg3 *tp)
6459 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6460 for (i = 1; i < tp->irq_cnt; i++)
6461 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6464 static void tg3_napi_fini(struct tg3 *tp)
6468 for (i = 0; i < tp->irq_cnt; i++)
6469 netif_napi_del(&tp->napi[i].napi);
6472 static inline void tg3_netif_stop(struct tg3 *tp)
6474 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6475 tg3_napi_disable(tp);
6476 netif_tx_disable(tp->dev);
6479 static inline void tg3_netif_start(struct tg3 *tp)
6481 /* NOTE: unconditional netif_tx_wake_all_queues is only
6482 * appropriate so long as all callers are assured to
6483 * have free tx slots (such as after tg3_init_hw)
6485 netif_tx_wake_all_queues(tp->dev);
6487 tg3_napi_enable(tp);
6488 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6489 tg3_enable_ints(tp);
6492 static void tg3_irq_quiesce(struct tg3 *tp)
6496 BUG_ON(tp->irq_sync);
6501 for (i = 0; i < tp->irq_cnt; i++)
6502 synchronize_irq(tp->napi[i].irq_vec);
6505 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6506 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6507 * with as well. Most of the time, this is not necessary except when
6508 * shutting down the device.
6510 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6512 spin_lock_bh(&tp->lock);
6514 tg3_irq_quiesce(tp);
6517 static inline void tg3_full_unlock(struct tg3 *tp)
6519 spin_unlock_bh(&tp->lock);
6522 /* One-shot MSI handler - Chip automatically disables interrupt
6523 * after sending MSI so driver doesn't have to do it.
6525 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6527 struct tg3_napi *tnapi = dev_id;
6528 struct tg3 *tp = tnapi->tp;
6530 prefetch(tnapi->hw_status);
6532 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6534 if (likely(!tg3_irq_sync(tp)))
6535 napi_schedule(&tnapi->napi);
6540 /* MSI ISR - No need to check for interrupt sharing and no need to
6541 * flush status block and interrupt mailbox. PCI ordering rules
6542 * guarantee that MSI will arrive after the status block.
6544 static irqreturn_t tg3_msi(int irq, void *dev_id)
6546 struct tg3_napi *tnapi = dev_id;
6547 struct tg3 *tp = tnapi->tp;
6549 prefetch(tnapi->hw_status);
6551 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6553 * Writing any value to intr-mbox-0 clears PCI INTA# and
6554 * chip-internal interrupt pending events.
6555 * Writing non-zero to intr-mbox-0 additional tells the
6556 * NIC to stop sending us irqs, engaging "in-intr-handler"
6559 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6560 if (likely(!tg3_irq_sync(tp)))
6561 napi_schedule(&tnapi->napi);
6563 return IRQ_RETVAL(1);
6566 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6568 struct tg3_napi *tnapi = dev_id;
6569 struct tg3 *tp = tnapi->tp;
6570 struct tg3_hw_status *sblk = tnapi->hw_status;
6571 unsigned int handled = 1;
6573 /* In INTx mode, it is possible for the interrupt to arrive at
6574 * the CPU before the status block posted prior to the interrupt.
6575 * Reading the PCI State register will confirm whether the
6576 * interrupt is ours and will flush the status block.
6578 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6579 if (tg3_flag(tp, CHIP_RESETTING) ||
6580 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6587 * Writing any value to intr-mbox-0 clears PCI INTA# and
6588 * chip-internal interrupt pending events.
6589 * Writing non-zero to intr-mbox-0 additional tells the
6590 * NIC to stop sending us irqs, engaging "in-intr-handler"
6593 * Flush the mailbox to de-assert the IRQ immediately to prevent
6594 * spurious interrupts. The flush impacts performance but
6595 * excessive spurious interrupts can be worse in some cases.
6597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6598 if (tg3_irq_sync(tp))
6600 sblk->status &= ~SD_STATUS_UPDATED;
6601 if (likely(tg3_has_work(tnapi))) {
6602 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6603 napi_schedule(&tnapi->napi);
6605 /* No work, shared interrupt perhaps? re-enable
6606 * interrupts, and flush that PCI write
6608 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6612 return IRQ_RETVAL(handled);
6615 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6617 struct tg3_napi *tnapi = dev_id;
6618 struct tg3 *tp = tnapi->tp;
6619 struct tg3_hw_status *sblk = tnapi->hw_status;
6620 unsigned int handled = 1;
6622 /* In INTx mode, it is possible for the interrupt to arrive at
6623 * the CPU before the status block posted prior to the interrupt.
6624 * Reading the PCI State register will confirm whether the
6625 * interrupt is ours and will flush the status block.
6627 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6628 if (tg3_flag(tp, CHIP_RESETTING) ||
6629 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6636 * writing any value to intr-mbox-0 clears PCI INTA# and
6637 * chip-internal interrupt pending events.
6638 * writing non-zero to intr-mbox-0 additional tells the
6639 * NIC to stop sending us irqs, engaging "in-intr-handler"
6642 * Flush the mailbox to de-assert the IRQ immediately to prevent
6643 * spurious interrupts. The flush impacts performance but
6644 * excessive spurious interrupts can be worse in some cases.
6646 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6649 * In a shared interrupt configuration, sometimes other devices'
6650 * interrupts will scream. We record the current status tag here
6651 * so that the above check can report that the screaming interrupts
6652 * are unhandled. Eventually they will be silenced.
6654 tnapi->last_irq_tag = sblk->status_tag;
6656 if (tg3_irq_sync(tp))
6659 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6661 napi_schedule(&tnapi->napi);
6664 return IRQ_RETVAL(handled);
6667 /* ISR for interrupt test */
6668 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6670 struct tg3_napi *tnapi = dev_id;
6671 struct tg3 *tp = tnapi->tp;
6672 struct tg3_hw_status *sblk = tnapi->hw_status;
6674 if ((sblk->status & SD_STATUS_UPDATED) ||
6675 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6676 tg3_disable_ints(tp);
6677 return IRQ_RETVAL(1);
6679 return IRQ_RETVAL(0);
6682 #ifdef CONFIG_NET_POLL_CONTROLLER
6683 static void tg3_poll_controller(struct net_device *dev)
6686 struct tg3 *tp = netdev_priv(dev);
6688 for (i = 0; i < tp->irq_cnt; i++)
6689 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6693 static void tg3_tx_timeout(struct net_device *dev)
6695 struct tg3 *tp = netdev_priv(dev);
6697 if (netif_msg_tx_err(tp)) {
6698 netdev_err(dev, "transmit timed out, resetting\n");
6702 tg3_reset_task_schedule(tp);
6705 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6706 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6708 u32 base = (u32) mapping & 0xffffffff;
6710 return (base > 0xffffdcc0) && (base + len + 8 < base);
6713 /* Test for DMA addresses > 40-bit */
6714 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6717 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6718 if (tg3_flag(tp, 40BIT_DMA_BUG))
6719 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6726 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6727 dma_addr_t mapping, u32 len, u32 flags,
6730 txbd->addr_hi = ((u64) mapping >> 32);
6731 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6732 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6733 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6736 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6737 dma_addr_t map, u32 len, u32 flags,
6740 struct tg3 *tp = tnapi->tp;
6743 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6746 if (tg3_4g_overflow_test(map, len))
6749 if (tg3_40bit_overflow_test(tp, map, len))
6752 if (tp->dma_limit) {
6753 u32 prvidx = *entry;
6754 u32 tmp_flag = flags & ~TXD_FLAG_END;
6755 while (len > tp->dma_limit && *budget) {
6756 u32 frag_len = tp->dma_limit;
6757 len -= tp->dma_limit;
6759 /* Avoid the 8byte DMA problem */
6761 len += tp->dma_limit / 2;
6762 frag_len = tp->dma_limit / 2;
6765 tnapi->tx_buffers[*entry].fragmented = true;
6767 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6768 frag_len, tmp_flag, mss, vlan);
6771 *entry = NEXT_TX(*entry);
6778 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6779 len, flags, mss, vlan);
6781 *entry = NEXT_TX(*entry);
6784 tnapi->tx_buffers[prvidx].fragmented = false;
6788 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6789 len, flags, mss, vlan);
6790 *entry = NEXT_TX(*entry);
6796 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6799 struct sk_buff *skb;
6800 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6805 pci_unmap_single(tnapi->tp->pdev,
6806 dma_unmap_addr(txb, mapping),
6810 while (txb->fragmented) {
6811 txb->fragmented = false;
6812 entry = NEXT_TX(entry);
6813 txb = &tnapi->tx_buffers[entry];
6816 for (i = 0; i <= last; i++) {
6817 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6819 entry = NEXT_TX(entry);
6820 txb = &tnapi->tx_buffers[entry];
6822 pci_unmap_page(tnapi->tp->pdev,
6823 dma_unmap_addr(txb, mapping),
6824 skb_frag_size(frag), PCI_DMA_TODEVICE);
6826 while (txb->fragmented) {
6827 txb->fragmented = false;
6828 entry = NEXT_TX(entry);
6829 txb = &tnapi->tx_buffers[entry];
6834 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6835 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6836 struct sk_buff **pskb,
6837 u32 *entry, u32 *budget,
6838 u32 base_flags, u32 mss, u32 vlan)
6840 struct tg3 *tp = tnapi->tp;
6841 struct sk_buff *new_skb, *skb = *pskb;
6842 dma_addr_t new_addr = 0;
6845 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6846 new_skb = skb_copy(skb, GFP_ATOMIC);
6848 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6850 new_skb = skb_copy_expand(skb,
6851 skb_headroom(skb) + more_headroom,
6852 skb_tailroom(skb), GFP_ATOMIC);
6858 /* New SKB is guaranteed to be linear. */
6859 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6861 /* Make sure the mapping succeeded */
6862 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6863 dev_kfree_skb(new_skb);
6866 u32 save_entry = *entry;
6868 base_flags |= TXD_FLAG_END;
6870 tnapi->tx_buffers[*entry].skb = new_skb;
6871 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6874 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6875 new_skb->len, base_flags,
6877 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6878 dev_kfree_skb(new_skb);
6889 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6891 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6892 * TSO header is greater than 80 bytes.
6894 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6896 struct sk_buff *segs, *nskb;
6897 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6899 /* Estimate the number of fragments in the worst case */
6900 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6901 netif_stop_queue(tp->dev);
6903 /* netif_tx_stop_queue() must be done before checking
6904 * checking tx index in tg3_tx_avail() below, because in
6905 * tg3_tx(), we update tx index before checking for
6906 * netif_tx_queue_stopped().
6909 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6910 return NETDEV_TX_BUSY;
6912 netif_wake_queue(tp->dev);
6915 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6917 goto tg3_tso_bug_end;
6923 tg3_start_xmit(nskb, tp->dev);
6929 return NETDEV_TX_OK;
6932 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6933 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6935 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6937 struct tg3 *tp = netdev_priv(dev);
6938 u32 len, entry, base_flags, mss, vlan = 0;
6940 int i = -1, would_hit_hwbug;
6942 struct tg3_napi *tnapi;
6943 struct netdev_queue *txq;
6946 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6947 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6948 if (tg3_flag(tp, ENABLE_TSS))
6951 budget = tg3_tx_avail(tnapi);
6953 /* We are running in BH disabled context with netif_tx_lock
6954 * and TX reclaim runs via tp->napi.poll inside of a software
6955 * interrupt. Furthermore, IRQ processing runs lockless so we have
6956 * no IRQ context deadlocks to worry about either. Rejoice!
6958 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6959 if (!netif_tx_queue_stopped(txq)) {
6960 netif_tx_stop_queue(txq);
6962 /* This is a hard error, log it. */
6964 "BUG! Tx Ring full when queue awake!\n");
6966 return NETDEV_TX_BUSY;
6969 entry = tnapi->tx_prod;
6971 if (skb->ip_summed == CHECKSUM_PARTIAL)
6972 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6974 mss = skb_shinfo(skb)->gso_size;
6977 u32 tcp_opt_len, hdr_len;
6979 if (skb_header_cloned(skb) &&
6980 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6984 tcp_opt_len = tcp_optlen(skb);
6986 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6988 if (!skb_is_gso_v6(skb)) {
6990 iph->tot_len = htons(mss + hdr_len);
6993 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6994 tg3_flag(tp, TSO_BUG))
6995 return tg3_tso_bug(tp, skb);
6997 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6998 TXD_FLAG_CPU_POST_DMA);
7000 if (tg3_flag(tp, HW_TSO_1) ||
7001 tg3_flag(tp, HW_TSO_2) ||
7002 tg3_flag(tp, HW_TSO_3)) {
7003 tcp_hdr(skb)->check = 0;
7004 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7006 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7011 if (tg3_flag(tp, HW_TSO_3)) {
7012 mss |= (hdr_len & 0xc) << 12;
7014 base_flags |= 0x00000010;
7015 base_flags |= (hdr_len & 0x3e0) << 5;
7016 } else if (tg3_flag(tp, HW_TSO_2))
7017 mss |= hdr_len << 9;
7018 else if (tg3_flag(tp, HW_TSO_1) ||
7019 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7020 if (tcp_opt_len || iph->ihl > 5) {
7023 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7024 mss |= (tsflags << 11);
7027 if (tcp_opt_len || iph->ihl > 5) {
7030 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7031 base_flags |= tsflags << 12;
7036 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7037 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7038 base_flags |= TXD_FLAG_JMB_PKT;
7040 if (vlan_tx_tag_present(skb)) {
7041 base_flags |= TXD_FLAG_VLAN;
7042 vlan = vlan_tx_tag_get(skb);
7045 len = skb_headlen(skb);
7047 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7048 if (pci_dma_mapping_error(tp->pdev, mapping))
7052 tnapi->tx_buffers[entry].skb = skb;
7053 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7055 would_hit_hwbug = 0;
7057 if (tg3_flag(tp, 5701_DMA_BUG))
7058 would_hit_hwbug = 1;
7060 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7061 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7063 would_hit_hwbug = 1;
7064 } else if (skb_shinfo(skb)->nr_frags > 0) {
7067 if (!tg3_flag(tp, HW_TSO_1) &&
7068 !tg3_flag(tp, HW_TSO_2) &&
7069 !tg3_flag(tp, HW_TSO_3))
7072 /* Now loop through additional data
7073 * fragments, and queue them.
7075 last = skb_shinfo(skb)->nr_frags - 1;
7076 for (i = 0; i <= last; i++) {
7077 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7079 len = skb_frag_size(frag);
7080 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7081 len, DMA_TO_DEVICE);
7083 tnapi->tx_buffers[entry].skb = NULL;
7084 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7086 if (dma_mapping_error(&tp->pdev->dev, mapping))
7090 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7092 ((i == last) ? TXD_FLAG_END : 0),
7094 would_hit_hwbug = 1;
7100 if (would_hit_hwbug) {
7101 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7103 /* If the workaround fails due to memory/mapping
7104 * failure, silently drop this packet.
7106 entry = tnapi->tx_prod;
7107 budget = tg3_tx_avail(tnapi);
7108 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7109 base_flags, mss, vlan))
7113 skb_tx_timestamp(skb);
7114 netdev_tx_sent_queue(txq, skb->len);
7116 /* Sync BD data before updating mailbox */
7119 /* Packets are ready, update Tx producer idx local and on card. */
7120 tw32_tx_mbox(tnapi->prodmbox, entry);
7122 tnapi->tx_prod = entry;
7123 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7124 netif_tx_stop_queue(txq);
7126 /* netif_tx_stop_queue() must be done before checking
7127 * checking tx index in tg3_tx_avail() below, because in
7128 * tg3_tx(), we update tx index before checking for
7129 * netif_tx_queue_stopped().
7132 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7133 netif_tx_wake_queue(txq);
7137 return NETDEV_TX_OK;
7140 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7141 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7146 return NETDEV_TX_OK;
7149 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7152 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7153 MAC_MODE_PORT_MODE_MASK);
7155 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7157 if (!tg3_flag(tp, 5705_PLUS))
7158 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7160 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7161 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7163 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7165 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7167 if (tg3_flag(tp, 5705_PLUS) ||
7168 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7170 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7173 tw32(MAC_MODE, tp->mac_mode);
7177 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7179 u32 val, bmcr, mac_mode, ptest = 0;
7181 tg3_phy_toggle_apd(tp, false);
7182 tg3_phy_toggle_automdix(tp, 0);
7184 if (extlpbk && tg3_phy_set_extloopbk(tp))
7187 bmcr = BMCR_FULLDPLX;
7192 bmcr |= BMCR_SPEED100;
7196 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7198 bmcr |= BMCR_SPEED100;
7201 bmcr |= BMCR_SPEED1000;
7206 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7207 tg3_readphy(tp, MII_CTRL1000, &val);
7208 val |= CTL1000_AS_MASTER |
7209 CTL1000_ENABLE_MASTER;
7210 tg3_writephy(tp, MII_CTRL1000, val);
7212 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7213 MII_TG3_FET_PTEST_TRIM_2;
7214 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7217 bmcr |= BMCR_LOOPBACK;
7219 tg3_writephy(tp, MII_BMCR, bmcr);
7221 /* The write needs to be flushed for the FETs */
7222 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7223 tg3_readphy(tp, MII_BMCR, &bmcr);
7227 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7228 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7229 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7230 MII_TG3_FET_PTEST_FRC_TX_LINK |
7231 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7233 /* The write needs to be flushed for the AC131 */
7234 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7237 /* Reset to prevent losing 1st rx packet intermittently */
7238 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7239 tg3_flag(tp, 5780_CLASS)) {
7240 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7242 tw32_f(MAC_RX_MODE, tp->rx_mode);
7245 mac_mode = tp->mac_mode &
7246 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7247 if (speed == SPEED_1000)
7248 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7250 mac_mode |= MAC_MODE_PORT_MODE_MII;
7252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7253 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7255 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7256 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7257 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7258 mac_mode |= MAC_MODE_LINK_POLARITY;
7260 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7261 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7264 tw32(MAC_MODE, mac_mode);
7270 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7272 struct tg3 *tp = netdev_priv(dev);
7274 if (features & NETIF_F_LOOPBACK) {
7275 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7278 spin_lock_bh(&tp->lock);
7279 tg3_mac_loopback(tp, true);
7280 netif_carrier_on(tp->dev);
7281 spin_unlock_bh(&tp->lock);
7282 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7284 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7287 spin_lock_bh(&tp->lock);
7288 tg3_mac_loopback(tp, false);
7289 /* Force link status check */
7290 tg3_setup_phy(tp, 1);
7291 spin_unlock_bh(&tp->lock);
7292 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7296 static netdev_features_t tg3_fix_features(struct net_device *dev,
7297 netdev_features_t features)
7299 struct tg3 *tp = netdev_priv(dev);
7301 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7302 features &= ~NETIF_F_ALL_TSO;
7307 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7309 netdev_features_t changed = dev->features ^ features;
7311 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7312 tg3_set_loopback(dev, features);
7317 static void tg3_rx_prodring_free(struct tg3 *tp,
7318 struct tg3_rx_prodring_set *tpr)
7322 if (tpr != &tp->napi[0].prodring) {
7323 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7324 i = (i + 1) & tp->rx_std_ring_mask)
7325 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7328 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7329 for (i = tpr->rx_jmb_cons_idx;
7330 i != tpr->rx_jmb_prod_idx;
7331 i = (i + 1) & tp->rx_jmb_ring_mask) {
7332 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7340 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7341 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7344 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7345 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7346 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7351 /* Initialize rx rings for packet processing.
7353 * The chip has been shut down and the driver detached from
7354 * the networking, so no interrupts or new tx packets will
7355 * end up in the driver. tp->{tx,}lock are held and thus
7358 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7359 struct tg3_rx_prodring_set *tpr)
7361 u32 i, rx_pkt_dma_sz;
7363 tpr->rx_std_cons_idx = 0;
7364 tpr->rx_std_prod_idx = 0;
7365 tpr->rx_jmb_cons_idx = 0;
7366 tpr->rx_jmb_prod_idx = 0;
7368 if (tpr != &tp->napi[0].prodring) {
7369 memset(&tpr->rx_std_buffers[0], 0,
7370 TG3_RX_STD_BUFF_RING_SIZE(tp));
7371 if (tpr->rx_jmb_buffers)
7372 memset(&tpr->rx_jmb_buffers[0], 0,
7373 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7377 /* Zero out all descriptors. */
7378 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7380 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7381 if (tg3_flag(tp, 5780_CLASS) &&
7382 tp->dev->mtu > ETH_DATA_LEN)
7383 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7384 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7386 /* Initialize invariants of the rings, we only set this
7387 * stuff once. This works because the card does not
7388 * write into the rx buffer posting rings.
7390 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7391 struct tg3_rx_buffer_desc *rxd;
7393 rxd = &tpr->rx_std[i];
7394 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7395 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7396 rxd->opaque = (RXD_OPAQUE_RING_STD |
7397 (i << RXD_OPAQUE_INDEX_SHIFT));
7400 /* Now allocate fresh SKBs for each rx ring. */
7401 for (i = 0; i < tp->rx_pending; i++) {
7402 unsigned int frag_size;
7404 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7406 netdev_warn(tp->dev,
7407 "Using a smaller RX standard ring. Only "
7408 "%d out of %d buffers were allocated "
7409 "successfully\n", i, tp->rx_pending);
7417 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7420 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7422 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7425 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7426 struct tg3_rx_buffer_desc *rxd;
7428 rxd = &tpr->rx_jmb[i].std;
7429 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7430 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7432 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7433 (i << RXD_OPAQUE_INDEX_SHIFT));
7436 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7437 unsigned int frag_size;
7439 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7441 netdev_warn(tp->dev,
7442 "Using a smaller RX jumbo ring. Only %d "
7443 "out of %d buffers were allocated "
7444 "successfully\n", i, tp->rx_jumbo_pending);
7447 tp->rx_jumbo_pending = i;
7456 tg3_rx_prodring_free(tp, tpr);
7460 static void tg3_rx_prodring_fini(struct tg3 *tp,
7461 struct tg3_rx_prodring_set *tpr)
7463 kfree(tpr->rx_std_buffers);
7464 tpr->rx_std_buffers = NULL;
7465 kfree(tpr->rx_jmb_buffers);
7466 tpr->rx_jmb_buffers = NULL;
7468 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7469 tpr->rx_std, tpr->rx_std_mapping);
7473 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7474 tpr->rx_jmb, tpr->rx_jmb_mapping);
7479 static int tg3_rx_prodring_init(struct tg3 *tp,
7480 struct tg3_rx_prodring_set *tpr)
7482 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7484 if (!tpr->rx_std_buffers)
7487 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7488 TG3_RX_STD_RING_BYTES(tp),
7489 &tpr->rx_std_mapping,
7494 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7495 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7497 if (!tpr->rx_jmb_buffers)
7500 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7501 TG3_RX_JMB_RING_BYTES(tp),
7502 &tpr->rx_jmb_mapping,
7511 tg3_rx_prodring_fini(tp, tpr);
7515 /* Free up pending packets in all rx/tx rings.
7517 * The chip has been shut down and the driver detached from
7518 * the networking, so no interrupts or new tx packets will
7519 * end up in the driver. tp->{tx,}lock is not held and we are not
7520 * in an interrupt context and thus may sleep.
7522 static void tg3_free_rings(struct tg3 *tp)
7526 for (j = 0; j < tp->irq_cnt; j++) {
7527 struct tg3_napi *tnapi = &tp->napi[j];
7529 tg3_rx_prodring_free(tp, &tnapi->prodring);
7531 if (!tnapi->tx_buffers)
7534 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7535 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7540 tg3_tx_skb_unmap(tnapi, i,
7541 skb_shinfo(skb)->nr_frags - 1);
7543 dev_kfree_skb_any(skb);
7545 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7549 /* Initialize tx/rx rings for packet processing.
7551 * The chip has been shut down and the driver detached from
7552 * the networking, so no interrupts or new tx packets will
7553 * end up in the driver. tp->{tx,}lock are held and thus
7556 static int tg3_init_rings(struct tg3 *tp)
7560 /* Free up all the SKBs. */
7563 for (i = 0; i < tp->irq_cnt; i++) {
7564 struct tg3_napi *tnapi = &tp->napi[i];
7566 tnapi->last_tag = 0;
7567 tnapi->last_irq_tag = 0;
7568 tnapi->hw_status->status = 0;
7569 tnapi->hw_status->status_tag = 0;
7570 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7575 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7577 tnapi->rx_rcb_ptr = 0;
7579 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7581 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7591 * Must not be invoked with interrupt sources disabled and
7592 * the hardware shutdown down.
7594 static void tg3_free_consistent(struct tg3 *tp)
7598 for (i = 0; i < tp->irq_cnt; i++) {
7599 struct tg3_napi *tnapi = &tp->napi[i];
7601 if (tnapi->tx_ring) {
7602 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7603 tnapi->tx_ring, tnapi->tx_desc_mapping);
7604 tnapi->tx_ring = NULL;
7607 kfree(tnapi->tx_buffers);
7608 tnapi->tx_buffers = NULL;
7610 if (tnapi->rx_rcb) {
7611 dma_free_coherent(&tp->pdev->dev,
7612 TG3_RX_RCB_RING_BYTES(tp),
7614 tnapi->rx_rcb_mapping);
7615 tnapi->rx_rcb = NULL;
7618 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7620 if (tnapi->hw_status) {
7621 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7623 tnapi->status_mapping);
7624 tnapi->hw_status = NULL;
7629 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7630 tp->hw_stats, tp->stats_mapping);
7631 tp->hw_stats = NULL;
7636 * Must not be invoked with interrupt sources disabled and
7637 * the hardware shutdown down. Can sleep.
7639 static int tg3_alloc_consistent(struct tg3 *tp)
7643 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7644 sizeof(struct tg3_hw_stats),
7650 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7652 for (i = 0; i < tp->irq_cnt; i++) {
7653 struct tg3_napi *tnapi = &tp->napi[i];
7654 struct tg3_hw_status *sblk;
7656 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7658 &tnapi->status_mapping,
7660 if (!tnapi->hw_status)
7663 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7664 sblk = tnapi->hw_status;
7666 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7669 /* If multivector TSS is enabled, vector 0 does not handle
7670 * tx interrupts. Don't allocate any resources for it.
7672 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7673 (i && tg3_flag(tp, ENABLE_TSS))) {
7674 tnapi->tx_buffers = kzalloc(
7675 sizeof(struct tg3_tx_ring_info) *
7676 TG3_TX_RING_SIZE, GFP_KERNEL);
7677 if (!tnapi->tx_buffers)
7680 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7682 &tnapi->tx_desc_mapping,
7684 if (!tnapi->tx_ring)
7689 * When RSS is enabled, the status block format changes
7690 * slightly. The "rx_jumbo_consumer", "reserved",
7691 * and "rx_mini_consumer" members get mapped to the
7692 * other three rx return ring producer indexes.
7696 if (tg3_flag(tp, ENABLE_RSS)) {
7697 tnapi->rx_rcb_prod_idx = NULL;
7702 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7705 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7708 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7711 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7716 * If multivector RSS is enabled, vector 0 does not handle
7717 * rx or tx interrupts. Don't allocate any resources for it.
7719 if (!i && tg3_flag(tp, ENABLE_RSS))
7722 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7723 TG3_RX_RCB_RING_BYTES(tp),
7724 &tnapi->rx_rcb_mapping,
7729 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7735 tg3_free_consistent(tp);
7739 #define MAX_WAIT_CNT 1000
7741 /* To stop a block, clear the enable bit and poll till it
7742 * clears. tp->lock is held.
7744 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7749 if (tg3_flag(tp, 5705_PLUS)) {
7756 /* We can't enable/disable these bits of the
7757 * 5705/5750, just say success.
7770 for (i = 0; i < MAX_WAIT_CNT; i++) {
7773 if ((val & enable_bit) == 0)
7777 if (i == MAX_WAIT_CNT && !silent) {
7778 dev_err(&tp->pdev->dev,
7779 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7787 /* tp->lock is held. */
7788 static int tg3_abort_hw(struct tg3 *tp, int silent)
7792 tg3_disable_ints(tp);
7794 tp->rx_mode &= ~RX_MODE_ENABLE;
7795 tw32_f(MAC_RX_MODE, tp->rx_mode);
7798 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7799 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7800 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7801 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7802 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7803 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7805 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7806 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7807 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7808 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7809 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7810 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7811 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7813 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7814 tw32_f(MAC_MODE, tp->mac_mode);
7817 tp->tx_mode &= ~TX_MODE_ENABLE;
7818 tw32_f(MAC_TX_MODE, tp->tx_mode);
7820 for (i = 0; i < MAX_WAIT_CNT; i++) {
7822 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7825 if (i >= MAX_WAIT_CNT) {
7826 dev_err(&tp->pdev->dev,
7827 "%s timed out, TX_MODE_ENABLE will not clear "
7828 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7832 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7833 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7834 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7836 tw32(FTQ_RESET, 0xffffffff);
7837 tw32(FTQ_RESET, 0x00000000);
7839 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7840 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7842 for (i = 0; i < tp->irq_cnt; i++) {
7843 struct tg3_napi *tnapi = &tp->napi[i];
7844 if (tnapi->hw_status)
7845 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7851 /* Save PCI command register before chip reset */
7852 static void tg3_save_pci_state(struct tg3 *tp)
7854 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7857 /* Restore PCI state after chip reset */
7858 static void tg3_restore_pci_state(struct tg3 *tp)
7862 /* Re-enable indirect register accesses. */
7863 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7864 tp->misc_host_ctrl);
7866 /* Set MAX PCI retry to zero. */
7867 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7868 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7869 tg3_flag(tp, PCIX_MODE))
7870 val |= PCISTATE_RETRY_SAME_DMA;
7871 /* Allow reads and writes to the APE register and memory space. */
7872 if (tg3_flag(tp, ENABLE_APE))
7873 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7874 PCISTATE_ALLOW_APE_SHMEM_WR |
7875 PCISTATE_ALLOW_APE_PSPACE_WR;
7876 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7878 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7880 if (!tg3_flag(tp, PCI_EXPRESS)) {
7881 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7882 tp->pci_cacheline_sz);
7883 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7887 /* Make sure PCI-X relaxed ordering bit is clear. */
7888 if (tg3_flag(tp, PCIX_MODE)) {
7891 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7893 pcix_cmd &= ~PCI_X_CMD_ERO;
7894 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7898 if (tg3_flag(tp, 5780_CLASS)) {
7900 /* Chip reset on 5780 will reset MSI enable bit,
7901 * so need to restore it.
7903 if (tg3_flag(tp, USING_MSI)) {
7906 pci_read_config_word(tp->pdev,
7907 tp->msi_cap + PCI_MSI_FLAGS,
7909 pci_write_config_word(tp->pdev,
7910 tp->msi_cap + PCI_MSI_FLAGS,
7911 ctrl | PCI_MSI_FLAGS_ENABLE);
7912 val = tr32(MSGINT_MODE);
7913 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7918 /* tp->lock is held. */
7919 static int tg3_chip_reset(struct tg3 *tp)
7922 void (*write_op)(struct tg3 *, u32, u32);
7927 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7929 /* No matching tg3_nvram_unlock() after this because
7930 * chip reset below will undo the nvram lock.
7932 tp->nvram_lock_cnt = 0;
7934 /* GRC_MISC_CFG core clock reset will clear the memory
7935 * enable bit in PCI register 4 and the MSI enable bit
7936 * on some chips, so we save relevant registers here.
7938 tg3_save_pci_state(tp);
7940 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7941 tg3_flag(tp, 5755_PLUS))
7942 tw32(GRC_FASTBOOT_PC, 0);
7945 * We must avoid the readl() that normally takes place.
7946 * It locks machines, causes machine checks, and other
7947 * fun things. So, temporarily disable the 5701
7948 * hardware workaround, while we do the reset.
7950 write_op = tp->write32;
7951 if (write_op == tg3_write_flush_reg32)
7952 tp->write32 = tg3_write32;
7954 /* Prevent the irq handler from reading or writing PCI registers
7955 * during chip reset when the memory enable bit in the PCI command
7956 * register may be cleared. The chip does not generate interrupt
7957 * at this time, but the irq handler may still be called due to irq
7958 * sharing or irqpoll.
7960 tg3_flag_set(tp, CHIP_RESETTING);
7961 for (i = 0; i < tp->irq_cnt; i++) {
7962 struct tg3_napi *tnapi = &tp->napi[i];
7963 if (tnapi->hw_status) {
7964 tnapi->hw_status->status = 0;
7965 tnapi->hw_status->status_tag = 0;
7967 tnapi->last_tag = 0;
7968 tnapi->last_irq_tag = 0;
7972 for (i = 0; i < tp->irq_cnt; i++)
7973 synchronize_irq(tp->napi[i].irq_vec);
7975 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7976 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7977 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7981 val = GRC_MISC_CFG_CORECLK_RESET;
7983 if (tg3_flag(tp, PCI_EXPRESS)) {
7984 /* Force PCIe 1.0a mode */
7985 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7986 !tg3_flag(tp, 57765_PLUS) &&
7987 tr32(TG3_PCIE_PHY_TSTCTL) ==
7988 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7989 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7991 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7992 tw32(GRC_MISC_CFG, (1 << 29));
7997 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7998 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7999 tw32(GRC_VCPU_EXT_CTRL,
8000 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8003 /* Manage gphy power for all CPMU absent PCIe devices. */
8004 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8005 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8007 tw32(GRC_MISC_CFG, val);
8009 /* restore 5701 hardware bug workaround write method */
8010 tp->write32 = write_op;
8012 /* Unfortunately, we have to delay before the PCI read back.
8013 * Some 575X chips even will not respond to a PCI cfg access
8014 * when the reset command is given to the chip.
8016 * How do these hardware designers expect things to work
8017 * properly if the PCI write is posted for a long period
8018 * of time? It is always necessary to have some method by
8019 * which a register read back can occur to push the write
8020 * out which does the reset.
8022 * For most tg3 variants the trick below was working.
8027 /* Flush PCI posted writes. The normal MMIO registers
8028 * are inaccessible at this time so this is the only
8029 * way to make this reliably (actually, this is no longer
8030 * the case, see above). I tried to use indirect
8031 * register read/write but this upset some 5701 variants.
8033 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8037 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
8040 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8044 /* Wait for link training to complete. */
8045 for (i = 0; i < 5000; i++)
8048 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8049 pci_write_config_dword(tp->pdev, 0xc4,
8050 cfg_val | (1 << 15));
8053 /* Clear the "no snoop" and "relaxed ordering" bits. */
8054 pci_read_config_word(tp->pdev,
8055 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8057 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
8058 PCI_EXP_DEVCTL_NOSNOOP_EN);
8060 * Older PCIe devices only support the 128 byte
8061 * MPS setting. Enforce the restriction.
8063 if (!tg3_flag(tp, CPMU_PRESENT))
8064 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
8065 pci_write_config_word(tp->pdev,
8066 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
8069 /* Clear error status */
8070 pci_write_config_word(tp->pdev,
8071 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
8072 PCI_EXP_DEVSTA_CED |
8073 PCI_EXP_DEVSTA_NFED |
8074 PCI_EXP_DEVSTA_FED |
8075 PCI_EXP_DEVSTA_URD);
8078 tg3_restore_pci_state(tp);
8080 tg3_flag_clear(tp, CHIP_RESETTING);
8081 tg3_flag_clear(tp, ERROR_PROCESSED);
8084 if (tg3_flag(tp, 5780_CLASS))
8085 val = tr32(MEMARB_MODE);
8086 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8088 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8090 tw32(0x5000, 0x400);
8093 tw32(GRC_MODE, tp->grc_mode);
8095 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8098 tw32(0xc4, val | (1 << 15));
8101 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8102 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8103 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8104 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8105 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8106 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8109 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8110 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8112 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8113 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8118 tw32_f(MAC_MODE, val);
8121 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8123 err = tg3_poll_fw(tp);
8129 if (tg3_flag(tp, PCI_EXPRESS) &&
8130 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8131 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8132 !tg3_flag(tp, 57765_PLUS)) {
8135 tw32(0x7c00, val | (1 << 25));
8138 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8139 val = tr32(TG3_CPMU_CLCK_ORIDE);
8140 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8143 /* Reprobe ASF enable state. */
8144 tg3_flag_clear(tp, ENABLE_ASF);
8145 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8146 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8147 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8150 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8151 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8152 tg3_flag_set(tp, ENABLE_ASF);
8153 tp->last_event_jiffies = jiffies;
8154 if (tg3_flag(tp, 5750_PLUS))
8155 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8162 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8163 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8165 /* tp->lock is held. */
8166 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8172 tg3_write_sig_pre_reset(tp, kind);
8174 tg3_abort_hw(tp, silent);
8175 err = tg3_chip_reset(tp);
8177 __tg3_set_mac_addr(tp, 0);
8179 tg3_write_sig_legacy(tp, kind);
8180 tg3_write_sig_post_reset(tp, kind);
8183 /* Save the stats across chip resets... */
8184 tg3_get_nstats(tp, &tp->net_stats_prev);
8185 tg3_get_estats(tp, &tp->estats_prev);
8187 /* And make sure the next sample is new data */
8188 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8197 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8199 struct tg3 *tp = netdev_priv(dev);
8200 struct sockaddr *addr = p;
8201 int err = 0, skip_mac_1 = 0;
8203 if (!is_valid_ether_addr(addr->sa_data))
8204 return -EADDRNOTAVAIL;
8206 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8208 if (!netif_running(dev))
8211 if (tg3_flag(tp, ENABLE_ASF)) {
8212 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8214 addr0_high = tr32(MAC_ADDR_0_HIGH);
8215 addr0_low = tr32(MAC_ADDR_0_LOW);
8216 addr1_high = tr32(MAC_ADDR_1_HIGH);
8217 addr1_low = tr32(MAC_ADDR_1_LOW);
8219 /* Skip MAC addr 1 if ASF is using it. */
8220 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8221 !(addr1_high == 0 && addr1_low == 0))
8224 spin_lock_bh(&tp->lock);
8225 __tg3_set_mac_addr(tp, skip_mac_1);
8226 spin_unlock_bh(&tp->lock);
8231 /* tp->lock is held. */
8232 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8233 dma_addr_t mapping, u32 maxlen_flags,
8237 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8238 ((u64) mapping >> 32));
8240 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8241 ((u64) mapping & 0xffffffff));
8243 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8246 if (!tg3_flag(tp, 5705_PLUS))
8248 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8252 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8256 if (!tg3_flag(tp, ENABLE_TSS)) {
8257 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8258 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8259 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8261 tw32(HOSTCC_TXCOL_TICKS, 0);
8262 tw32(HOSTCC_TXMAX_FRAMES, 0);
8263 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8266 if (!tg3_flag(tp, ENABLE_RSS)) {
8267 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8268 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8269 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8271 tw32(HOSTCC_RXCOL_TICKS, 0);
8272 tw32(HOSTCC_RXMAX_FRAMES, 0);
8273 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8276 if (!tg3_flag(tp, 5705_PLUS)) {
8277 u32 val = ec->stats_block_coalesce_usecs;
8279 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8280 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8282 if (!netif_carrier_ok(tp->dev))
8285 tw32(HOSTCC_STAT_COAL_TICKS, val);
8288 for (i = 0; i < tp->irq_cnt - 1; i++) {
8291 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8292 tw32(reg, ec->rx_coalesce_usecs);
8293 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8294 tw32(reg, ec->rx_max_coalesced_frames);
8295 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8296 tw32(reg, ec->rx_max_coalesced_frames_irq);
8298 if (tg3_flag(tp, ENABLE_TSS)) {
8299 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8300 tw32(reg, ec->tx_coalesce_usecs);
8301 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8302 tw32(reg, ec->tx_max_coalesced_frames);
8303 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8304 tw32(reg, ec->tx_max_coalesced_frames_irq);
8308 for (; i < tp->irq_max - 1; i++) {
8309 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8310 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8311 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8313 if (tg3_flag(tp, ENABLE_TSS)) {
8314 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8315 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8316 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8321 /* tp->lock is held. */
8322 static void tg3_rings_reset(struct tg3 *tp)
8325 u32 stblk, txrcb, rxrcb, limit;
8326 struct tg3_napi *tnapi = &tp->napi[0];
8328 /* Disable all transmit rings but the first. */
8329 if (!tg3_flag(tp, 5705_PLUS))
8330 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8331 else if (tg3_flag(tp, 5717_PLUS))
8332 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8333 else if (tg3_flag(tp, 57765_CLASS))
8334 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8336 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8338 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8339 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8340 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8341 BDINFO_FLAGS_DISABLED);
8344 /* Disable all receive return rings but the first. */
8345 if (tg3_flag(tp, 5717_PLUS))
8346 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8347 else if (!tg3_flag(tp, 5705_PLUS))
8348 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8349 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8350 tg3_flag(tp, 57765_CLASS))
8351 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8353 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8355 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8356 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8357 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8358 BDINFO_FLAGS_DISABLED);
8360 /* Disable interrupts */
8361 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8362 tp->napi[0].chk_msi_cnt = 0;
8363 tp->napi[0].last_rx_cons = 0;
8364 tp->napi[0].last_tx_cons = 0;
8366 /* Zero mailbox registers. */
8367 if (tg3_flag(tp, SUPPORT_MSIX)) {
8368 for (i = 1; i < tp->irq_max; i++) {
8369 tp->napi[i].tx_prod = 0;
8370 tp->napi[i].tx_cons = 0;
8371 if (tg3_flag(tp, ENABLE_TSS))
8372 tw32_mailbox(tp->napi[i].prodmbox, 0);
8373 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8374 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8375 tp->napi[i].chk_msi_cnt = 0;
8376 tp->napi[i].last_rx_cons = 0;
8377 tp->napi[i].last_tx_cons = 0;
8379 if (!tg3_flag(tp, ENABLE_TSS))
8380 tw32_mailbox(tp->napi[0].prodmbox, 0);
8382 tp->napi[0].tx_prod = 0;
8383 tp->napi[0].tx_cons = 0;
8384 tw32_mailbox(tp->napi[0].prodmbox, 0);
8385 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8388 /* Make sure the NIC-based send BD rings are disabled. */
8389 if (!tg3_flag(tp, 5705_PLUS)) {
8390 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8391 for (i = 0; i < 16; i++)
8392 tw32_tx_mbox(mbox + i * 8, 0);
8395 txrcb = NIC_SRAM_SEND_RCB;
8396 rxrcb = NIC_SRAM_RCV_RET_RCB;
8398 /* Clear status block in ram. */
8399 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8401 /* Set status block DMA address */
8402 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8403 ((u64) tnapi->status_mapping >> 32));
8404 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8405 ((u64) tnapi->status_mapping & 0xffffffff));
8407 if (tnapi->tx_ring) {
8408 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8409 (TG3_TX_RING_SIZE <<
8410 BDINFO_FLAGS_MAXLEN_SHIFT),
8411 NIC_SRAM_TX_BUFFER_DESC);
8412 txrcb += TG3_BDINFO_SIZE;
8415 if (tnapi->rx_rcb) {
8416 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8417 (tp->rx_ret_ring_mask + 1) <<
8418 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8419 rxrcb += TG3_BDINFO_SIZE;
8422 stblk = HOSTCC_STATBLCK_RING1;
8424 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8425 u64 mapping = (u64)tnapi->status_mapping;
8426 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8427 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8429 /* Clear status block in ram. */
8430 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8432 if (tnapi->tx_ring) {
8433 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8434 (TG3_TX_RING_SIZE <<
8435 BDINFO_FLAGS_MAXLEN_SHIFT),
8436 NIC_SRAM_TX_BUFFER_DESC);
8437 txrcb += TG3_BDINFO_SIZE;
8440 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8441 ((tp->rx_ret_ring_mask + 1) <<
8442 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8445 rxrcb += TG3_BDINFO_SIZE;
8449 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8451 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8453 if (!tg3_flag(tp, 5750_PLUS) ||
8454 tg3_flag(tp, 5780_CLASS) ||
8455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8457 tg3_flag(tp, 57765_PLUS))
8458 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8459 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8460 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8461 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8463 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8465 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8466 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8468 val = min(nic_rep_thresh, host_rep_thresh);
8469 tw32(RCVBDI_STD_THRESH, val);
8471 if (tg3_flag(tp, 57765_PLUS))
8472 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8474 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8477 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8479 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8481 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8482 tw32(RCVBDI_JUMBO_THRESH, val);
8484 if (tg3_flag(tp, 57765_PLUS))
8485 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8488 static inline u32 calc_crc(unsigned char *buf, int len)
8496 for (j = 0; j < len; j++) {
8499 for (k = 0; k < 8; k++) {
8512 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8514 /* accept or reject all multicast frames */
8515 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8516 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8517 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8518 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8521 static void __tg3_set_rx_mode(struct net_device *dev)
8523 struct tg3 *tp = netdev_priv(dev);
8526 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8527 RX_MODE_KEEP_VLAN_TAG);
8529 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8530 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8533 if (!tg3_flag(tp, ENABLE_ASF))
8534 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8537 if (dev->flags & IFF_PROMISC) {
8538 /* Promiscuous mode. */
8539 rx_mode |= RX_MODE_PROMISC;
8540 } else if (dev->flags & IFF_ALLMULTI) {
8541 /* Accept all multicast. */
8542 tg3_set_multi(tp, 1);
8543 } else if (netdev_mc_empty(dev)) {
8544 /* Reject all multicast. */
8545 tg3_set_multi(tp, 0);
8547 /* Accept one or more multicast(s). */
8548 struct netdev_hw_addr *ha;
8549 u32 mc_filter[4] = { 0, };
8554 netdev_for_each_mc_addr(ha, dev) {
8555 crc = calc_crc(ha->addr, ETH_ALEN);
8557 regidx = (bit & 0x60) >> 5;
8559 mc_filter[regidx] |= (1 << bit);
8562 tw32(MAC_HASH_REG_0, mc_filter[0]);
8563 tw32(MAC_HASH_REG_1, mc_filter[1]);
8564 tw32(MAC_HASH_REG_2, mc_filter[2]);
8565 tw32(MAC_HASH_REG_3, mc_filter[3]);
8568 if (rx_mode != tp->rx_mode) {
8569 tp->rx_mode = rx_mode;
8570 tw32_f(MAC_RX_MODE, rx_mode);
8575 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8579 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8580 tp->rss_ind_tbl[i] =
8581 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8584 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8588 if (!tg3_flag(tp, SUPPORT_MSIX))
8591 if (tp->irq_cnt <= 2) {
8592 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8596 /* Validate table against current IRQ count */
8597 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8598 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8602 if (i != TG3_RSS_INDIR_TBL_SIZE)
8603 tg3_rss_init_dflt_indir_tbl(tp);
8606 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8609 u32 reg = MAC_RSS_INDIR_TBL_0;
8611 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8612 u32 val = tp->rss_ind_tbl[i];
8614 for (; i % 8; i++) {
8616 val |= tp->rss_ind_tbl[i];
8623 /* tp->lock is held. */
8624 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8626 u32 val, rdmac_mode;
8628 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8630 tg3_disable_ints(tp);
8634 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8636 if (tg3_flag(tp, INIT_COMPLETE))
8637 tg3_abort_hw(tp, 1);
8639 /* Enable MAC control of LPI */
8640 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8641 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8642 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8643 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8645 tw32_f(TG3_CPMU_EEE_CTRL,
8646 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8648 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8649 TG3_CPMU_EEEMD_LPI_IN_TX |
8650 TG3_CPMU_EEEMD_LPI_IN_RX |
8651 TG3_CPMU_EEEMD_EEE_ENABLE;
8653 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8654 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8656 if (tg3_flag(tp, ENABLE_APE))
8657 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8659 tw32_f(TG3_CPMU_EEE_MODE, val);
8661 tw32_f(TG3_CPMU_EEE_DBTMR1,
8662 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8663 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8665 tw32_f(TG3_CPMU_EEE_DBTMR2,
8666 TG3_CPMU_DBTMR2_APE_TX_2047US |
8667 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8673 err = tg3_chip_reset(tp);
8677 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8679 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8680 val = tr32(TG3_CPMU_CTRL);
8681 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8682 tw32(TG3_CPMU_CTRL, val);
8684 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8685 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8686 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8687 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8689 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8690 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8691 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8692 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8694 val = tr32(TG3_CPMU_HST_ACC);
8695 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8696 val |= CPMU_HST_ACC_MACCLK_6_25;
8697 tw32(TG3_CPMU_HST_ACC, val);
8700 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8701 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8702 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8703 PCIE_PWR_MGMT_L1_THRESH_4MS;
8704 tw32(PCIE_PWR_MGMT_THRESH, val);
8706 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8707 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8709 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8711 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8712 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8715 if (tg3_flag(tp, L1PLLPD_EN)) {
8716 u32 grc_mode = tr32(GRC_MODE);
8718 /* Access the lower 1K of PL PCIE block registers. */
8719 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8720 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8722 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8723 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8724 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8726 tw32(GRC_MODE, grc_mode);
8729 if (tg3_flag(tp, 57765_CLASS)) {
8730 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8731 u32 grc_mode = tr32(GRC_MODE);
8733 /* Access the lower 1K of PL PCIE block registers. */
8734 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8735 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8737 val = tr32(TG3_PCIE_TLDLPL_PORT +
8738 TG3_PCIE_PL_LO_PHYCTL5);
8739 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8740 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8742 tw32(GRC_MODE, grc_mode);
8745 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8746 u32 grc_mode = tr32(GRC_MODE);
8748 /* Access the lower 1K of DL PCIE block registers. */
8749 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8750 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8752 val = tr32(TG3_PCIE_TLDLPL_PORT +
8753 TG3_PCIE_DL_LO_FTSMAX);
8754 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8755 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8756 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8758 tw32(GRC_MODE, grc_mode);
8761 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8762 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8763 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8764 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8767 /* This works around an issue with Athlon chipsets on
8768 * B3 tigon3 silicon. This bit has no effect on any
8769 * other revision. But do not set this on PCI Express
8770 * chips and don't even touch the clocks if the CPMU is present.
8772 if (!tg3_flag(tp, CPMU_PRESENT)) {
8773 if (!tg3_flag(tp, PCI_EXPRESS))
8774 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8775 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8778 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8779 tg3_flag(tp, PCIX_MODE)) {
8780 val = tr32(TG3PCI_PCISTATE);
8781 val |= PCISTATE_RETRY_SAME_DMA;
8782 tw32(TG3PCI_PCISTATE, val);
8785 if (tg3_flag(tp, ENABLE_APE)) {
8786 /* Allow reads and writes to the
8787 * APE register and memory space.
8789 val = tr32(TG3PCI_PCISTATE);
8790 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8791 PCISTATE_ALLOW_APE_SHMEM_WR |
8792 PCISTATE_ALLOW_APE_PSPACE_WR;
8793 tw32(TG3PCI_PCISTATE, val);
8796 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8797 /* Enable some hw fixes. */
8798 val = tr32(TG3PCI_MSI_DATA);
8799 val |= (1 << 26) | (1 << 28) | (1 << 29);
8800 tw32(TG3PCI_MSI_DATA, val);
8803 /* Descriptor ring init may make accesses to the
8804 * NIC SRAM area to setup the TX descriptors, so we
8805 * can only do this after the hardware has been
8806 * successfully reset.
8808 err = tg3_init_rings(tp);
8812 if (tg3_flag(tp, 57765_PLUS)) {
8813 val = tr32(TG3PCI_DMA_RW_CTRL) &
8814 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8815 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8816 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8817 if (!tg3_flag(tp, 57765_CLASS) &&
8818 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8819 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8820 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8821 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8822 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8823 /* This value is determined during the probe time DMA
8824 * engine test, tg3_test_dma.
8826 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8829 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8830 GRC_MODE_4X_NIC_SEND_RINGS |
8831 GRC_MODE_NO_TX_PHDR_CSUM |
8832 GRC_MODE_NO_RX_PHDR_CSUM);
8833 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8835 /* Pseudo-header checksum is done by hardware logic and not
8836 * the offload processers, so make the chip do the pseudo-
8837 * header checksums on receive. For transmit it is more
8838 * convenient to do the pseudo-header checksum in software
8839 * as Linux does that on transmit for us in all cases.
8841 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8845 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8847 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8848 val = tr32(GRC_MISC_CFG);
8850 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8851 tw32(GRC_MISC_CFG, val);
8853 /* Initialize MBUF/DESC pool. */
8854 if (tg3_flag(tp, 5750_PLUS)) {
8856 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8857 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8858 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8859 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8861 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8862 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8863 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8864 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8867 fw_len = tp->fw_len;
8868 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8869 tw32(BUFMGR_MB_POOL_ADDR,
8870 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8871 tw32(BUFMGR_MB_POOL_SIZE,
8872 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8875 if (tp->dev->mtu <= ETH_DATA_LEN) {
8876 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8877 tp->bufmgr_config.mbuf_read_dma_low_water);
8878 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8879 tp->bufmgr_config.mbuf_mac_rx_low_water);
8880 tw32(BUFMGR_MB_HIGH_WATER,
8881 tp->bufmgr_config.mbuf_high_water);
8883 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8884 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8885 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8886 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8887 tw32(BUFMGR_MB_HIGH_WATER,
8888 tp->bufmgr_config.mbuf_high_water_jumbo);
8890 tw32(BUFMGR_DMA_LOW_WATER,
8891 tp->bufmgr_config.dma_low_water);
8892 tw32(BUFMGR_DMA_HIGH_WATER,
8893 tp->bufmgr_config.dma_high_water);
8895 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8897 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8898 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8899 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8900 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8901 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8902 tw32(BUFMGR_MODE, val);
8903 for (i = 0; i < 2000; i++) {
8904 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8909 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8913 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8914 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8916 tg3_setup_rxbd_thresholds(tp);
8918 /* Initialize TG3_BDINFO's at:
8919 * RCVDBDI_STD_BD: standard eth size rx ring
8920 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8921 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8924 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8925 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8926 * ring attribute flags
8927 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8929 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8930 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8932 * The size of each ring is fixed in the firmware, but the location is
8935 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8936 ((u64) tpr->rx_std_mapping >> 32));
8937 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8938 ((u64) tpr->rx_std_mapping & 0xffffffff));
8939 if (!tg3_flag(tp, 5717_PLUS))
8940 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8941 NIC_SRAM_RX_BUFFER_DESC);
8943 /* Disable the mini ring */
8944 if (!tg3_flag(tp, 5705_PLUS))
8945 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8946 BDINFO_FLAGS_DISABLED);
8948 /* Program the jumbo buffer descriptor ring control
8949 * blocks on those devices that have them.
8951 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8952 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8954 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8955 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8956 ((u64) tpr->rx_jmb_mapping >> 32));
8957 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8958 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8959 val = TG3_RX_JMB_RING_SIZE(tp) <<
8960 BDINFO_FLAGS_MAXLEN_SHIFT;
8961 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8962 val | BDINFO_FLAGS_USE_EXT_RECV);
8963 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8964 tg3_flag(tp, 57765_CLASS))
8965 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8966 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8968 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8969 BDINFO_FLAGS_DISABLED);
8972 if (tg3_flag(tp, 57765_PLUS)) {
8973 val = TG3_RX_STD_RING_SIZE(tp);
8974 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8975 val |= (TG3_RX_STD_DMA_SZ << 2);
8977 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8979 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8981 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8983 tpr->rx_std_prod_idx = tp->rx_pending;
8984 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8986 tpr->rx_jmb_prod_idx =
8987 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8988 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8990 tg3_rings_reset(tp);
8992 /* Initialize MAC address and backoff seed. */
8993 __tg3_set_mac_addr(tp, 0);
8995 /* MTU + ethernet header + FCS + optional VLAN tag */
8996 tw32(MAC_RX_MTU_SIZE,
8997 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8999 /* The slot time is changed by tg3_setup_phy if we
9000 * run at gigabit with half duplex.
9002 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9003 (6 << TX_LENGTHS_IPG_SHIFT) |
9004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9007 val |= tr32(MAC_TX_LENGTHS) &
9008 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9009 TX_LENGTHS_CNT_DWN_VAL_MSK);
9011 tw32(MAC_TX_LENGTHS, val);
9013 /* Receive rules. */
9014 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9015 tw32(RCVLPC_CONFIG, 0x0181);
9017 /* Calculate RDMAC_MODE setting early, we need it to determine
9018 * the RCVLPC_STATE_ENABLE mask.
9020 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9021 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9022 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9023 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9024 RDMAC_MODE_LNGREAD_ENAB);
9026 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9027 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9029 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9030 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9031 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9032 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9033 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9034 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9037 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9038 if (tg3_flag(tp, TSO_CAPABLE) &&
9039 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9040 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9041 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9042 !tg3_flag(tp, IS_5788)) {
9043 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9047 if (tg3_flag(tp, PCI_EXPRESS))
9048 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9050 if (tg3_flag(tp, HW_TSO_1) ||
9051 tg3_flag(tp, HW_TSO_2) ||
9052 tg3_flag(tp, HW_TSO_3))
9053 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9055 if (tg3_flag(tp, 57765_PLUS) ||
9056 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9057 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9058 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9060 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9061 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9063 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9064 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9065 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9066 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9067 tg3_flag(tp, 57765_PLUS)) {
9068 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9069 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9070 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9071 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9072 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9073 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9074 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9075 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9076 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9078 tw32(TG3_RDMA_RSRVCTRL_REG,
9079 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9082 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9083 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9084 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9085 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9086 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9087 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9090 /* Receive/send statistics. */
9091 if (tg3_flag(tp, 5750_PLUS)) {
9092 val = tr32(RCVLPC_STATS_ENABLE);
9093 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9094 tw32(RCVLPC_STATS_ENABLE, val);
9095 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9096 tg3_flag(tp, TSO_CAPABLE)) {
9097 val = tr32(RCVLPC_STATS_ENABLE);
9098 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9099 tw32(RCVLPC_STATS_ENABLE, val);
9101 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9103 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9104 tw32(SNDDATAI_STATSENAB, 0xffffff);
9105 tw32(SNDDATAI_STATSCTRL,
9106 (SNDDATAI_SCTRL_ENABLE |
9107 SNDDATAI_SCTRL_FASTUPD));
9109 /* Setup host coalescing engine. */
9110 tw32(HOSTCC_MODE, 0);
9111 for (i = 0; i < 2000; i++) {
9112 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9117 __tg3_set_coalesce(tp, &tp->coal);
9119 if (!tg3_flag(tp, 5705_PLUS)) {
9120 /* Status/statistics block address. See tg3_timer,
9121 * the tg3_periodic_fetch_stats call there, and
9122 * tg3_get_stats to see how this works for 5705/5750 chips.
9124 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9125 ((u64) tp->stats_mapping >> 32));
9126 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9127 ((u64) tp->stats_mapping & 0xffffffff));
9128 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9130 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9132 /* Clear statistics and status block memory areas */
9133 for (i = NIC_SRAM_STATS_BLK;
9134 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9136 tg3_write_mem(tp, i, 0);
9141 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9143 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9144 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9145 if (!tg3_flag(tp, 5705_PLUS))
9146 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9148 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9149 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9150 /* reset to prevent losing 1st rx packet intermittently */
9151 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9155 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9156 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9157 MAC_MODE_FHDE_ENABLE;
9158 if (tg3_flag(tp, ENABLE_APE))
9159 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9160 if (!tg3_flag(tp, 5705_PLUS) &&
9161 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9162 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9163 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9164 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9167 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9168 * If TG3_FLAG_IS_NIC is zero, we should read the
9169 * register to preserve the GPIO settings for LOMs. The GPIOs,
9170 * whether used as inputs or outputs, are set by boot code after
9173 if (!tg3_flag(tp, IS_NIC)) {
9176 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9177 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9178 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9180 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9181 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9182 GRC_LCLCTRL_GPIO_OUTPUT3;
9184 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9185 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9187 tp->grc_local_ctrl &= ~gpio_mask;
9188 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9190 /* GPIO1 must be driven high for eeprom write protect */
9191 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9192 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9193 GRC_LCLCTRL_GPIO_OUTPUT1);
9195 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9198 if (tg3_flag(tp, USING_MSIX)) {
9199 val = tr32(MSGINT_MODE);
9200 val |= MSGINT_MODE_ENABLE;
9201 if (tp->irq_cnt > 1)
9202 val |= MSGINT_MODE_MULTIVEC_EN;
9203 if (!tg3_flag(tp, 1SHOT_MSI))
9204 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9205 tw32(MSGINT_MODE, val);
9208 if (!tg3_flag(tp, 5705_PLUS)) {
9209 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9213 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9214 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9215 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9216 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9217 WDMAC_MODE_LNGREAD_ENAB);
9219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9220 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9221 if (tg3_flag(tp, TSO_CAPABLE) &&
9222 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9223 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9225 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9226 !tg3_flag(tp, IS_5788)) {
9227 val |= WDMAC_MODE_RX_ACCEL;
9231 /* Enable host coalescing bug fix */
9232 if (tg3_flag(tp, 5755_PLUS))
9233 val |= WDMAC_MODE_STATUS_TAG_FIX;
9235 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9236 val |= WDMAC_MODE_BURST_ALL_DATA;
9238 tw32_f(WDMAC_MODE, val);
9241 if (tg3_flag(tp, PCIX_MODE)) {
9244 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9246 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9247 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9248 pcix_cmd |= PCI_X_CMD_READ_2K;
9249 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9250 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9251 pcix_cmd |= PCI_X_CMD_READ_2K;
9253 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9257 tw32_f(RDMAC_MODE, rdmac_mode);
9260 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9261 if (!tg3_flag(tp, 5705_PLUS))
9262 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9264 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9266 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9268 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9270 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9271 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9272 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9273 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9274 val |= RCVDBDI_MODE_LRG_RING_SZ;
9275 tw32(RCVDBDI_MODE, val);
9276 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9277 if (tg3_flag(tp, HW_TSO_1) ||
9278 tg3_flag(tp, HW_TSO_2) ||
9279 tg3_flag(tp, HW_TSO_3))
9280 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9281 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9282 if (tg3_flag(tp, ENABLE_TSS))
9283 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9284 tw32(SNDBDI_MODE, val);
9285 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9287 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9288 err = tg3_load_5701_a0_firmware_fix(tp);
9293 if (tg3_flag(tp, TSO_CAPABLE)) {
9294 err = tg3_load_tso_firmware(tp);
9299 tp->tx_mode = TX_MODE_ENABLE;
9301 if (tg3_flag(tp, 5755_PLUS) ||
9302 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9303 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9305 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9306 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9307 tp->tx_mode &= ~val;
9308 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9311 tw32_f(MAC_TX_MODE, tp->tx_mode);
9314 if (tg3_flag(tp, ENABLE_RSS)) {
9315 tg3_rss_write_indir_tbl(tp);
9317 /* Setup the "secret" hash key. */
9318 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9319 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9320 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9321 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9322 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9323 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9324 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9325 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9326 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9327 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9330 tp->rx_mode = RX_MODE_ENABLE;
9331 if (tg3_flag(tp, 5755_PLUS))
9332 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9334 if (tg3_flag(tp, ENABLE_RSS))
9335 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9336 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9337 RX_MODE_RSS_IPV6_HASH_EN |
9338 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9339 RX_MODE_RSS_IPV4_HASH_EN |
9340 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9342 tw32_f(MAC_RX_MODE, tp->rx_mode);
9345 tw32(MAC_LED_CTRL, tp->led_ctrl);
9347 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9348 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9349 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9352 tw32_f(MAC_RX_MODE, tp->rx_mode);
9355 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9356 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9357 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9358 /* Set drive transmission level to 1.2V */
9359 /* only if the signal pre-emphasis bit is not set */
9360 val = tr32(MAC_SERDES_CFG);
9363 tw32(MAC_SERDES_CFG, val);
9365 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9366 tw32(MAC_SERDES_CFG, 0x616000);
9369 /* Prevent chip from dropping frames when flow control
9372 if (tg3_flag(tp, 57765_CLASS))
9376 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9378 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9379 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9380 /* Use hardware link auto-negotiation */
9381 tg3_flag_set(tp, HW_AUTONEG);
9384 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9385 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9388 tmp = tr32(SERDES_RX_CTRL);
9389 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9390 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9391 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9392 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9395 if (!tg3_flag(tp, USE_PHYLIB)) {
9396 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9397 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9399 err = tg3_setup_phy(tp, 0);
9403 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9404 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9407 /* Clear CRC stats. */
9408 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9409 tg3_writephy(tp, MII_TG3_TEST1,
9410 tmp | MII_TG3_TEST1_CRC_EN);
9411 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9416 __tg3_set_rx_mode(tp->dev);
9418 /* Initialize receive rules. */
9419 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9420 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9421 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9422 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9424 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9428 if (tg3_flag(tp, ENABLE_ASF))
9432 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9434 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9436 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9438 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9440 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9442 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9444 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9446 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9448 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9450 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9452 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9454 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9456 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9458 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9466 if (tg3_flag(tp, ENABLE_APE))
9467 /* Write our heartbeat update interval to APE. */
9468 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9469 APE_HOST_HEARTBEAT_INT_DISABLE);
9471 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9476 /* Called at device open time to get the chip ready for
9477 * packet processing. Invoked with tp->lock held.
9479 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9481 tg3_switch_clocks(tp);
9483 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9485 return tg3_reset_hw(tp, reset_phy);
9488 #if IS_ENABLED(CONFIG_HWMON)
9489 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9493 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9494 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9496 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9499 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9500 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9501 memset(ocir, 0, TG3_OCIR_LEN);
9505 /* sysfs attributes for hwmon */
9506 static ssize_t tg3_show_temp(struct device *dev,
9507 struct device_attribute *devattr, char *buf)
9509 struct pci_dev *pdev = to_pci_dev(dev);
9510 struct net_device *netdev = pci_get_drvdata(pdev);
9511 struct tg3 *tp = netdev_priv(netdev);
9512 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9515 spin_lock_bh(&tp->lock);
9516 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9517 sizeof(temperature));
9518 spin_unlock_bh(&tp->lock);
9519 return sprintf(buf, "%u\n", temperature);
9523 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9524 TG3_TEMP_SENSOR_OFFSET);
9525 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9526 TG3_TEMP_CAUTION_OFFSET);
9527 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9528 TG3_TEMP_MAX_OFFSET);
9530 static struct attribute *tg3_attributes[] = {
9531 &sensor_dev_attr_temp1_input.dev_attr.attr,
9532 &sensor_dev_attr_temp1_crit.dev_attr.attr,
9533 &sensor_dev_attr_temp1_max.dev_attr.attr,
9537 static const struct attribute_group tg3_group = {
9538 .attrs = tg3_attributes,
9543 static void tg3_hwmon_close(struct tg3 *tp)
9545 #if IS_ENABLED(CONFIG_HWMON)
9546 if (tp->hwmon_dev) {
9547 hwmon_device_unregister(tp->hwmon_dev);
9548 tp->hwmon_dev = NULL;
9549 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9554 static void tg3_hwmon_open(struct tg3 *tp)
9556 #if IS_ENABLED(CONFIG_HWMON)
9559 struct pci_dev *pdev = tp->pdev;
9560 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9562 tg3_sd_scan_scratchpad(tp, ocirs);
9564 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9565 if (!ocirs[i].src_data_length)
9568 size += ocirs[i].src_hdr_length;
9569 size += ocirs[i].src_data_length;
9575 /* Register hwmon sysfs hooks */
9576 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9578 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9582 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9583 if (IS_ERR(tp->hwmon_dev)) {
9584 tp->hwmon_dev = NULL;
9585 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9586 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9592 #define TG3_STAT_ADD32(PSTAT, REG) \
9593 do { u32 __val = tr32(REG); \
9594 (PSTAT)->low += __val; \
9595 if ((PSTAT)->low < __val) \
9596 (PSTAT)->high += 1; \
9599 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9601 struct tg3_hw_stats *sp = tp->hw_stats;
9603 if (!netif_carrier_ok(tp->dev))
9606 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9607 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9608 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9609 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9610 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9611 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9612 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9613 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9614 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9615 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9616 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9617 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9618 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9620 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9621 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9622 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9623 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9624 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9625 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9626 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9627 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9628 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9629 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9630 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9631 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9632 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9633 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9635 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9636 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9637 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9638 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9639 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9641 u32 val = tr32(HOSTCC_FLOW_ATTN);
9642 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9644 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9645 sp->rx_discards.low += val;
9646 if (sp->rx_discards.low < val)
9647 sp->rx_discards.high += 1;
9649 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9651 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9654 static void tg3_chk_missed_msi(struct tg3 *tp)
9658 for (i = 0; i < tp->irq_cnt; i++) {
9659 struct tg3_napi *tnapi = &tp->napi[i];
9661 if (tg3_has_work(tnapi)) {
9662 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9663 tnapi->last_tx_cons == tnapi->tx_cons) {
9664 if (tnapi->chk_msi_cnt < 1) {
9665 tnapi->chk_msi_cnt++;
9671 tnapi->chk_msi_cnt = 0;
9672 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9673 tnapi->last_tx_cons = tnapi->tx_cons;
9677 static void tg3_timer(unsigned long __opaque)
9679 struct tg3 *tp = (struct tg3 *) __opaque;
9681 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9684 spin_lock(&tp->lock);
9686 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9687 tg3_flag(tp, 57765_CLASS))
9688 tg3_chk_missed_msi(tp);
9690 if (!tg3_flag(tp, TAGGED_STATUS)) {
9691 /* All of this garbage is because when using non-tagged
9692 * IRQ status the mailbox/status_block protocol the chip
9693 * uses with the cpu is race prone.
9695 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9696 tw32(GRC_LOCAL_CTRL,
9697 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9699 tw32(HOSTCC_MODE, tp->coalesce_mode |
9700 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9703 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9704 spin_unlock(&tp->lock);
9705 tg3_reset_task_schedule(tp);
9710 /* This part only runs once per second. */
9711 if (!--tp->timer_counter) {
9712 if (tg3_flag(tp, 5705_PLUS))
9713 tg3_periodic_fetch_stats(tp);
9715 if (tp->setlpicnt && !--tp->setlpicnt)
9716 tg3_phy_eee_enable(tp);
9718 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9722 mac_stat = tr32(MAC_STATUS);
9725 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9726 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9728 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9732 tg3_setup_phy(tp, 0);
9733 } else if (tg3_flag(tp, POLL_SERDES)) {
9734 u32 mac_stat = tr32(MAC_STATUS);
9737 if (netif_carrier_ok(tp->dev) &&
9738 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9741 if (!netif_carrier_ok(tp->dev) &&
9742 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9743 MAC_STATUS_SIGNAL_DET))) {
9747 if (!tp->serdes_counter) {
9750 ~MAC_MODE_PORT_MODE_MASK));
9752 tw32_f(MAC_MODE, tp->mac_mode);
9755 tg3_setup_phy(tp, 0);
9757 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9758 tg3_flag(tp, 5780_CLASS)) {
9759 tg3_serdes_parallel_detect(tp);
9762 tp->timer_counter = tp->timer_multiplier;
9765 /* Heartbeat is only sent once every 2 seconds.
9767 * The heartbeat is to tell the ASF firmware that the host
9768 * driver is still alive. In the event that the OS crashes,
9769 * ASF needs to reset the hardware to free up the FIFO space
9770 * that may be filled with rx packets destined for the host.
9771 * If the FIFO is full, ASF will no longer function properly.
9773 * Unintended resets have been reported on real time kernels
9774 * where the timer doesn't run on time. Netpoll will also have
9777 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9778 * to check the ring condition when the heartbeat is expiring
9779 * before doing the reset. This will prevent most unintended
9782 if (!--tp->asf_counter) {
9783 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9784 tg3_wait_for_event_ack(tp);
9786 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9787 FWCMD_NICDRV_ALIVE3);
9788 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9789 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9790 TG3_FW_UPDATE_TIMEOUT_SEC);
9792 tg3_generate_fw_event(tp);
9794 tp->asf_counter = tp->asf_multiplier;
9797 spin_unlock(&tp->lock);
9800 tp->timer.expires = jiffies + tp->timer_offset;
9801 add_timer(&tp->timer);
9804 static void __devinit tg3_timer_init(struct tg3 *tp)
9806 if (tg3_flag(tp, TAGGED_STATUS) &&
9807 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9808 !tg3_flag(tp, 57765_CLASS))
9809 tp->timer_offset = HZ;
9811 tp->timer_offset = HZ / 10;
9813 BUG_ON(tp->timer_offset > HZ);
9815 tp->timer_multiplier = (HZ / tp->timer_offset);
9816 tp->asf_multiplier = (HZ / tp->timer_offset) *
9817 TG3_FW_UPDATE_FREQ_SEC;
9819 init_timer(&tp->timer);
9820 tp->timer.data = (unsigned long) tp;
9821 tp->timer.function = tg3_timer;
9824 static void tg3_timer_start(struct tg3 *tp)
9826 tp->asf_counter = tp->asf_multiplier;
9827 tp->timer_counter = tp->timer_multiplier;
9829 tp->timer.expires = jiffies + tp->timer_offset;
9830 add_timer(&tp->timer);
9833 static void tg3_timer_stop(struct tg3 *tp)
9835 del_timer_sync(&tp->timer);
9838 /* Restart hardware after configuration changes, self-test, etc.
9839 * Invoked with tp->lock held.
9841 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9842 __releases(tp->lock)
9843 __acquires(tp->lock)
9847 err = tg3_init_hw(tp, reset_phy);
9850 "Failed to re-initialize device, aborting\n");
9851 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9852 tg3_full_unlock(tp);
9855 tg3_napi_enable(tp);
9857 tg3_full_lock(tp, 0);
9862 static void tg3_reset_task(struct work_struct *work)
9864 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9867 tg3_full_lock(tp, 0);
9869 if (!netif_running(tp->dev)) {
9870 tg3_flag_clear(tp, RESET_TASK_PENDING);
9871 tg3_full_unlock(tp);
9875 tg3_full_unlock(tp);
9881 tg3_full_lock(tp, 1);
9883 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9884 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9885 tp->write32_rx_mbox = tg3_write_flush_reg32;
9886 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9887 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9890 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9891 err = tg3_init_hw(tp, 1);
9895 tg3_netif_start(tp);
9898 tg3_full_unlock(tp);
9903 tg3_flag_clear(tp, RESET_TASK_PENDING);
9906 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9909 unsigned long flags;
9911 struct tg3_napi *tnapi = &tp->napi[irq_num];
9913 if (tp->irq_cnt == 1)
9914 name = tp->dev->name;
9916 name = &tnapi->irq_lbl[0];
9917 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9918 name[IFNAMSIZ-1] = 0;
9921 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9923 if (tg3_flag(tp, 1SHOT_MSI))
9928 if (tg3_flag(tp, TAGGED_STATUS))
9929 fn = tg3_interrupt_tagged;
9930 flags = IRQF_SHARED;
9933 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9936 static int tg3_test_interrupt(struct tg3 *tp)
9938 struct tg3_napi *tnapi = &tp->napi[0];
9939 struct net_device *dev = tp->dev;
9940 int err, i, intr_ok = 0;
9943 if (!netif_running(dev))
9946 tg3_disable_ints(tp);
9948 free_irq(tnapi->irq_vec, tnapi);
9951 * Turn off MSI one shot mode. Otherwise this test has no
9952 * observable way to know whether the interrupt was delivered.
9954 if (tg3_flag(tp, 57765_PLUS)) {
9955 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9956 tw32(MSGINT_MODE, val);
9959 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9960 IRQF_SHARED, dev->name, tnapi);
9964 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9965 tg3_enable_ints(tp);
9967 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9970 for (i = 0; i < 5; i++) {
9971 u32 int_mbox, misc_host_ctrl;
9973 int_mbox = tr32_mailbox(tnapi->int_mbox);
9974 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9976 if ((int_mbox != 0) ||
9977 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9982 if (tg3_flag(tp, 57765_PLUS) &&
9983 tnapi->hw_status->status_tag != tnapi->last_tag)
9984 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9989 tg3_disable_ints(tp);
9991 free_irq(tnapi->irq_vec, tnapi);
9993 err = tg3_request_irq(tp, 0);
9999 /* Reenable MSI one shot mode. */
10000 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10001 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10002 tw32(MSGINT_MODE, val);
10010 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10011 * successfully restored
10013 static int tg3_test_msi(struct tg3 *tp)
10018 if (!tg3_flag(tp, USING_MSI))
10021 /* Turn off SERR reporting in case MSI terminates with Master
10024 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10025 pci_write_config_word(tp->pdev, PCI_COMMAND,
10026 pci_cmd & ~PCI_COMMAND_SERR);
10028 err = tg3_test_interrupt(tp);
10030 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10035 /* other failures */
10039 /* MSI test failed, go back to INTx mode */
10040 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10041 "to INTx mode. Please report this failure to the PCI "
10042 "maintainer and include system chipset information\n");
10044 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10046 pci_disable_msi(tp->pdev);
10048 tg3_flag_clear(tp, USING_MSI);
10049 tp->napi[0].irq_vec = tp->pdev->irq;
10051 err = tg3_request_irq(tp, 0);
10055 /* Need to reset the chip because the MSI cycle may have terminated
10056 * with Master Abort.
10058 tg3_full_lock(tp, 1);
10060 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10061 err = tg3_init_hw(tp, 1);
10063 tg3_full_unlock(tp);
10066 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10071 static int tg3_request_firmware(struct tg3 *tp)
10073 const __be32 *fw_data;
10075 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10076 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10081 fw_data = (void *)tp->fw->data;
10083 /* Firmware blob starts with version numbers, followed by
10084 * start address and _full_ length including BSS sections
10085 * (which must be longer than the actual data, of course
10088 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
10089 if (tp->fw_len < (tp->fw->size - 12)) {
10090 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10091 tp->fw_len, tp->fw_needed);
10092 release_firmware(tp->fw);
10097 /* We no longer need firmware; we have it. */
10098 tp->fw_needed = NULL;
10102 static bool tg3_enable_msix(struct tg3 *tp)
10105 struct msix_entry msix_ent[tp->irq_max];
10107 tp->irq_cnt = netif_get_num_default_rss_queues();
10108 if (tp->irq_cnt > 1) {
10109 /* We want as many rx rings enabled as there are cpus.
10110 * In multiqueue MSI-X mode, the first MSI-X vector
10111 * only deals with link interrupts, etc, so we add
10112 * one to the number of vectors we are requesting.
10114 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
10117 for (i = 0; i < tp->irq_max; i++) {
10118 msix_ent[i].entry = i;
10119 msix_ent[i].vector = 0;
10122 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10125 } else if (rc != 0) {
10126 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10128 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10133 for (i = 0; i < tp->irq_max; i++)
10134 tp->napi[i].irq_vec = msix_ent[i].vector;
10136 netif_set_real_num_tx_queues(tp->dev, 1);
10137 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
10138 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
10139 pci_disable_msix(tp->pdev);
10143 if (tp->irq_cnt > 1) {
10144 tg3_flag_set(tp, ENABLE_RSS);
10146 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
10147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
10148 tg3_flag_set(tp, ENABLE_TSS);
10149 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
10156 static void tg3_ints_init(struct tg3 *tp)
10158 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10159 !tg3_flag(tp, TAGGED_STATUS)) {
10160 /* All MSI supporting chips should support tagged
10161 * status. Assert that this is the case.
10163 netdev_warn(tp->dev,
10164 "MSI without TAGGED_STATUS? Not using MSI\n");
10168 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10169 tg3_flag_set(tp, USING_MSIX);
10170 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10171 tg3_flag_set(tp, USING_MSI);
10173 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10174 u32 msi_mode = tr32(MSGINT_MODE);
10175 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10176 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10177 if (!tg3_flag(tp, 1SHOT_MSI))
10178 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10179 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10182 if (!tg3_flag(tp, USING_MSIX)) {
10184 tp->napi[0].irq_vec = tp->pdev->irq;
10185 netif_set_real_num_tx_queues(tp->dev, 1);
10186 netif_set_real_num_rx_queues(tp->dev, 1);
10190 static void tg3_ints_fini(struct tg3 *tp)
10192 if (tg3_flag(tp, USING_MSIX))
10193 pci_disable_msix(tp->pdev);
10194 else if (tg3_flag(tp, USING_MSI))
10195 pci_disable_msi(tp->pdev);
10196 tg3_flag_clear(tp, USING_MSI);
10197 tg3_flag_clear(tp, USING_MSIX);
10198 tg3_flag_clear(tp, ENABLE_RSS);
10199 tg3_flag_clear(tp, ENABLE_TSS);
10202 static int tg3_open(struct net_device *dev)
10204 struct tg3 *tp = netdev_priv(dev);
10207 if (tp->fw_needed) {
10208 err = tg3_request_firmware(tp);
10209 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10213 netdev_warn(tp->dev, "TSO capability disabled\n");
10214 tg3_flag_clear(tp, TSO_CAPABLE);
10215 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10216 netdev_notice(tp->dev, "TSO capability restored\n");
10217 tg3_flag_set(tp, TSO_CAPABLE);
10221 netif_carrier_off(tp->dev);
10223 err = tg3_power_up(tp);
10227 tg3_full_lock(tp, 0);
10229 tg3_disable_ints(tp);
10230 tg3_flag_clear(tp, INIT_COMPLETE);
10232 tg3_full_unlock(tp);
10235 * Setup interrupts first so we know how
10236 * many NAPI resources to allocate
10240 tg3_rss_check_indir_tbl(tp);
10242 /* The placement of this call is tied
10243 * to the setup and use of Host TX descriptors.
10245 err = tg3_alloc_consistent(tp);
10251 tg3_napi_enable(tp);
10253 for (i = 0; i < tp->irq_cnt; i++) {
10254 struct tg3_napi *tnapi = &tp->napi[i];
10255 err = tg3_request_irq(tp, i);
10257 for (i--; i >= 0; i--) {
10258 tnapi = &tp->napi[i];
10259 free_irq(tnapi->irq_vec, tnapi);
10265 tg3_full_lock(tp, 0);
10267 err = tg3_init_hw(tp, 1);
10269 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10270 tg3_free_rings(tp);
10273 tg3_full_unlock(tp);
10278 if (tg3_flag(tp, USING_MSI)) {
10279 err = tg3_test_msi(tp);
10282 tg3_full_lock(tp, 0);
10283 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10284 tg3_free_rings(tp);
10285 tg3_full_unlock(tp);
10290 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10291 u32 val = tr32(PCIE_TRANSACTION_CFG);
10293 tw32(PCIE_TRANSACTION_CFG,
10294 val | PCIE_TRANS_CFG_1SHOT_MSI);
10300 tg3_hwmon_open(tp);
10302 tg3_full_lock(tp, 0);
10304 tg3_timer_start(tp);
10305 tg3_flag_set(tp, INIT_COMPLETE);
10306 tg3_enable_ints(tp);
10308 tg3_full_unlock(tp);
10310 netif_tx_start_all_queues(dev);
10313 * Reset loopback feature if it was turned on while the device was down
10314 * make sure that it's installed properly now.
10316 if (dev->features & NETIF_F_LOOPBACK)
10317 tg3_set_loopback(dev, dev->features);
10322 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10323 struct tg3_napi *tnapi = &tp->napi[i];
10324 free_irq(tnapi->irq_vec, tnapi);
10328 tg3_napi_disable(tp);
10330 tg3_free_consistent(tp);
10334 tg3_frob_aux_power(tp, false);
10335 pci_set_power_state(tp->pdev, PCI_D3hot);
10339 static int tg3_close(struct net_device *dev)
10342 struct tg3 *tp = netdev_priv(dev);
10344 tg3_napi_disable(tp);
10345 tg3_reset_task_cancel(tp);
10347 netif_tx_stop_all_queues(dev);
10349 tg3_timer_stop(tp);
10351 tg3_hwmon_close(tp);
10355 tg3_full_lock(tp, 1);
10357 tg3_disable_ints(tp);
10359 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10360 tg3_free_rings(tp);
10361 tg3_flag_clear(tp, INIT_COMPLETE);
10363 tg3_full_unlock(tp);
10365 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10366 struct tg3_napi *tnapi = &tp->napi[i];
10367 free_irq(tnapi->irq_vec, tnapi);
10372 /* Clear stats across close / open calls */
10373 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10374 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10378 tg3_free_consistent(tp);
10380 tg3_power_down(tp);
10382 netif_carrier_off(tp->dev);
10387 static inline u64 get_stat64(tg3_stat64_t *val)
10389 return ((u64)val->high << 32) | ((u64)val->low);
10392 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10394 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10396 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10397 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10401 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10402 tg3_writephy(tp, MII_TG3_TEST1,
10403 val | MII_TG3_TEST1_CRC_EN);
10404 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10408 tp->phy_crc_errors += val;
10410 return tp->phy_crc_errors;
10413 return get_stat64(&hw_stats->rx_fcs_errors);
10416 #define ESTAT_ADD(member) \
10417 estats->member = old_estats->member + \
10418 get_stat64(&hw_stats->member)
10420 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10422 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10423 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10425 ESTAT_ADD(rx_octets);
10426 ESTAT_ADD(rx_fragments);
10427 ESTAT_ADD(rx_ucast_packets);
10428 ESTAT_ADD(rx_mcast_packets);
10429 ESTAT_ADD(rx_bcast_packets);
10430 ESTAT_ADD(rx_fcs_errors);
10431 ESTAT_ADD(rx_align_errors);
10432 ESTAT_ADD(rx_xon_pause_rcvd);
10433 ESTAT_ADD(rx_xoff_pause_rcvd);
10434 ESTAT_ADD(rx_mac_ctrl_rcvd);
10435 ESTAT_ADD(rx_xoff_entered);
10436 ESTAT_ADD(rx_frame_too_long_errors);
10437 ESTAT_ADD(rx_jabbers);
10438 ESTAT_ADD(rx_undersize_packets);
10439 ESTAT_ADD(rx_in_length_errors);
10440 ESTAT_ADD(rx_out_length_errors);
10441 ESTAT_ADD(rx_64_or_less_octet_packets);
10442 ESTAT_ADD(rx_65_to_127_octet_packets);
10443 ESTAT_ADD(rx_128_to_255_octet_packets);
10444 ESTAT_ADD(rx_256_to_511_octet_packets);
10445 ESTAT_ADD(rx_512_to_1023_octet_packets);
10446 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10447 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10448 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10449 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10450 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10452 ESTAT_ADD(tx_octets);
10453 ESTAT_ADD(tx_collisions);
10454 ESTAT_ADD(tx_xon_sent);
10455 ESTAT_ADD(tx_xoff_sent);
10456 ESTAT_ADD(tx_flow_control);
10457 ESTAT_ADD(tx_mac_errors);
10458 ESTAT_ADD(tx_single_collisions);
10459 ESTAT_ADD(tx_mult_collisions);
10460 ESTAT_ADD(tx_deferred);
10461 ESTAT_ADD(tx_excessive_collisions);
10462 ESTAT_ADD(tx_late_collisions);
10463 ESTAT_ADD(tx_collide_2times);
10464 ESTAT_ADD(tx_collide_3times);
10465 ESTAT_ADD(tx_collide_4times);
10466 ESTAT_ADD(tx_collide_5times);
10467 ESTAT_ADD(tx_collide_6times);
10468 ESTAT_ADD(tx_collide_7times);
10469 ESTAT_ADD(tx_collide_8times);
10470 ESTAT_ADD(tx_collide_9times);
10471 ESTAT_ADD(tx_collide_10times);
10472 ESTAT_ADD(tx_collide_11times);
10473 ESTAT_ADD(tx_collide_12times);
10474 ESTAT_ADD(tx_collide_13times);
10475 ESTAT_ADD(tx_collide_14times);
10476 ESTAT_ADD(tx_collide_15times);
10477 ESTAT_ADD(tx_ucast_packets);
10478 ESTAT_ADD(tx_mcast_packets);
10479 ESTAT_ADD(tx_bcast_packets);
10480 ESTAT_ADD(tx_carrier_sense_errors);
10481 ESTAT_ADD(tx_discards);
10482 ESTAT_ADD(tx_errors);
10484 ESTAT_ADD(dma_writeq_full);
10485 ESTAT_ADD(dma_write_prioq_full);
10486 ESTAT_ADD(rxbds_empty);
10487 ESTAT_ADD(rx_discards);
10488 ESTAT_ADD(rx_errors);
10489 ESTAT_ADD(rx_threshold_hit);
10491 ESTAT_ADD(dma_readq_full);
10492 ESTAT_ADD(dma_read_prioq_full);
10493 ESTAT_ADD(tx_comp_queue_full);
10495 ESTAT_ADD(ring_set_send_prod_index);
10496 ESTAT_ADD(ring_status_update);
10497 ESTAT_ADD(nic_irqs);
10498 ESTAT_ADD(nic_avoided_irqs);
10499 ESTAT_ADD(nic_tx_threshold_hit);
10501 ESTAT_ADD(mbuf_lwm_thresh_hit);
10504 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10506 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10507 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10509 stats->rx_packets = old_stats->rx_packets +
10510 get_stat64(&hw_stats->rx_ucast_packets) +
10511 get_stat64(&hw_stats->rx_mcast_packets) +
10512 get_stat64(&hw_stats->rx_bcast_packets);
10514 stats->tx_packets = old_stats->tx_packets +
10515 get_stat64(&hw_stats->tx_ucast_packets) +
10516 get_stat64(&hw_stats->tx_mcast_packets) +
10517 get_stat64(&hw_stats->tx_bcast_packets);
10519 stats->rx_bytes = old_stats->rx_bytes +
10520 get_stat64(&hw_stats->rx_octets);
10521 stats->tx_bytes = old_stats->tx_bytes +
10522 get_stat64(&hw_stats->tx_octets);
10524 stats->rx_errors = old_stats->rx_errors +
10525 get_stat64(&hw_stats->rx_errors);
10526 stats->tx_errors = old_stats->tx_errors +
10527 get_stat64(&hw_stats->tx_errors) +
10528 get_stat64(&hw_stats->tx_mac_errors) +
10529 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10530 get_stat64(&hw_stats->tx_discards);
10532 stats->multicast = old_stats->multicast +
10533 get_stat64(&hw_stats->rx_mcast_packets);
10534 stats->collisions = old_stats->collisions +
10535 get_stat64(&hw_stats->tx_collisions);
10537 stats->rx_length_errors = old_stats->rx_length_errors +
10538 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10539 get_stat64(&hw_stats->rx_undersize_packets);
10541 stats->rx_over_errors = old_stats->rx_over_errors +
10542 get_stat64(&hw_stats->rxbds_empty);
10543 stats->rx_frame_errors = old_stats->rx_frame_errors +
10544 get_stat64(&hw_stats->rx_align_errors);
10545 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10546 get_stat64(&hw_stats->tx_discards);
10547 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10548 get_stat64(&hw_stats->tx_carrier_sense_errors);
10550 stats->rx_crc_errors = old_stats->rx_crc_errors +
10551 tg3_calc_crc_errors(tp);
10553 stats->rx_missed_errors = old_stats->rx_missed_errors +
10554 get_stat64(&hw_stats->rx_discards);
10556 stats->rx_dropped = tp->rx_dropped;
10557 stats->tx_dropped = tp->tx_dropped;
10560 static int tg3_get_regs_len(struct net_device *dev)
10562 return TG3_REG_BLK_SIZE;
10565 static void tg3_get_regs(struct net_device *dev,
10566 struct ethtool_regs *regs, void *_p)
10568 struct tg3 *tp = netdev_priv(dev);
10572 memset(_p, 0, TG3_REG_BLK_SIZE);
10574 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10577 tg3_full_lock(tp, 0);
10579 tg3_dump_legacy_regs(tp, (u32 *)_p);
10581 tg3_full_unlock(tp);
10584 static int tg3_get_eeprom_len(struct net_device *dev)
10586 struct tg3 *tp = netdev_priv(dev);
10588 return tp->nvram_size;
10591 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10593 struct tg3 *tp = netdev_priv(dev);
10596 u32 i, offset, len, b_offset, b_count;
10599 if (tg3_flag(tp, NO_NVRAM))
10602 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10605 offset = eeprom->offset;
10609 eeprom->magic = TG3_EEPROM_MAGIC;
10612 /* adjustments to start on required 4 byte boundary */
10613 b_offset = offset & 3;
10614 b_count = 4 - b_offset;
10615 if (b_count > len) {
10616 /* i.e. offset=1 len=2 */
10619 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10622 memcpy(data, ((char *)&val) + b_offset, b_count);
10625 eeprom->len += b_count;
10628 /* read bytes up to the last 4 byte boundary */
10629 pd = &data[eeprom->len];
10630 for (i = 0; i < (len - (len & 3)); i += 4) {
10631 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10636 memcpy(pd + i, &val, 4);
10641 /* read last bytes not ending on 4 byte boundary */
10642 pd = &data[eeprom->len];
10644 b_offset = offset + len - b_count;
10645 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10648 memcpy(pd, &val, b_count);
10649 eeprom->len += b_count;
10654 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10656 struct tg3 *tp = netdev_priv(dev);
10658 u32 offset, len, b_offset, odd_len;
10662 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10665 if (tg3_flag(tp, NO_NVRAM) ||
10666 eeprom->magic != TG3_EEPROM_MAGIC)
10669 offset = eeprom->offset;
10672 if ((b_offset = (offset & 3))) {
10673 /* adjustments to start on required 4 byte boundary */
10674 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10685 /* adjustments to end on required 4 byte boundary */
10687 len = (len + 3) & ~3;
10688 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10694 if (b_offset || odd_len) {
10695 buf = kmalloc(len, GFP_KERNEL);
10699 memcpy(buf, &start, 4);
10701 memcpy(buf+len-4, &end, 4);
10702 memcpy(buf + b_offset, data, eeprom->len);
10705 ret = tg3_nvram_write_block(tp, offset, len, buf);
10713 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10715 struct tg3 *tp = netdev_priv(dev);
10717 if (tg3_flag(tp, USE_PHYLIB)) {
10718 struct phy_device *phydev;
10719 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10721 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10722 return phy_ethtool_gset(phydev, cmd);
10725 cmd->supported = (SUPPORTED_Autoneg);
10727 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10728 cmd->supported |= (SUPPORTED_1000baseT_Half |
10729 SUPPORTED_1000baseT_Full);
10731 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10732 cmd->supported |= (SUPPORTED_100baseT_Half |
10733 SUPPORTED_100baseT_Full |
10734 SUPPORTED_10baseT_Half |
10735 SUPPORTED_10baseT_Full |
10737 cmd->port = PORT_TP;
10739 cmd->supported |= SUPPORTED_FIBRE;
10740 cmd->port = PORT_FIBRE;
10743 cmd->advertising = tp->link_config.advertising;
10744 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10745 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10746 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10747 cmd->advertising |= ADVERTISED_Pause;
10749 cmd->advertising |= ADVERTISED_Pause |
10750 ADVERTISED_Asym_Pause;
10752 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10753 cmd->advertising |= ADVERTISED_Asym_Pause;
10756 if (netif_running(dev) && netif_carrier_ok(dev)) {
10757 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10758 cmd->duplex = tp->link_config.active_duplex;
10759 cmd->lp_advertising = tp->link_config.rmt_adv;
10760 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10761 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10762 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10764 cmd->eth_tp_mdix = ETH_TP_MDI;
10767 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10768 cmd->duplex = DUPLEX_UNKNOWN;
10769 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10771 cmd->phy_address = tp->phy_addr;
10772 cmd->transceiver = XCVR_INTERNAL;
10773 cmd->autoneg = tp->link_config.autoneg;
10779 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10781 struct tg3 *tp = netdev_priv(dev);
10782 u32 speed = ethtool_cmd_speed(cmd);
10784 if (tg3_flag(tp, USE_PHYLIB)) {
10785 struct phy_device *phydev;
10786 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10788 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10789 return phy_ethtool_sset(phydev, cmd);
10792 if (cmd->autoneg != AUTONEG_ENABLE &&
10793 cmd->autoneg != AUTONEG_DISABLE)
10796 if (cmd->autoneg == AUTONEG_DISABLE &&
10797 cmd->duplex != DUPLEX_FULL &&
10798 cmd->duplex != DUPLEX_HALF)
10801 if (cmd->autoneg == AUTONEG_ENABLE) {
10802 u32 mask = ADVERTISED_Autoneg |
10804 ADVERTISED_Asym_Pause;
10806 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10807 mask |= ADVERTISED_1000baseT_Half |
10808 ADVERTISED_1000baseT_Full;
10810 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10811 mask |= ADVERTISED_100baseT_Half |
10812 ADVERTISED_100baseT_Full |
10813 ADVERTISED_10baseT_Half |
10814 ADVERTISED_10baseT_Full |
10817 mask |= ADVERTISED_FIBRE;
10819 if (cmd->advertising & ~mask)
10822 mask &= (ADVERTISED_1000baseT_Half |
10823 ADVERTISED_1000baseT_Full |
10824 ADVERTISED_100baseT_Half |
10825 ADVERTISED_100baseT_Full |
10826 ADVERTISED_10baseT_Half |
10827 ADVERTISED_10baseT_Full);
10829 cmd->advertising &= mask;
10831 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10832 if (speed != SPEED_1000)
10835 if (cmd->duplex != DUPLEX_FULL)
10838 if (speed != SPEED_100 &&
10844 tg3_full_lock(tp, 0);
10846 tp->link_config.autoneg = cmd->autoneg;
10847 if (cmd->autoneg == AUTONEG_ENABLE) {
10848 tp->link_config.advertising = (cmd->advertising |
10849 ADVERTISED_Autoneg);
10850 tp->link_config.speed = SPEED_UNKNOWN;
10851 tp->link_config.duplex = DUPLEX_UNKNOWN;
10853 tp->link_config.advertising = 0;
10854 tp->link_config.speed = speed;
10855 tp->link_config.duplex = cmd->duplex;
10858 if (netif_running(dev))
10859 tg3_setup_phy(tp, 1);
10861 tg3_full_unlock(tp);
10866 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10868 struct tg3 *tp = netdev_priv(dev);
10870 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10871 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10872 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10873 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10876 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10878 struct tg3 *tp = netdev_priv(dev);
10880 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10881 wol->supported = WAKE_MAGIC;
10883 wol->supported = 0;
10885 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10886 wol->wolopts = WAKE_MAGIC;
10887 memset(&wol->sopass, 0, sizeof(wol->sopass));
10890 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10892 struct tg3 *tp = netdev_priv(dev);
10893 struct device *dp = &tp->pdev->dev;
10895 if (wol->wolopts & ~WAKE_MAGIC)
10897 if ((wol->wolopts & WAKE_MAGIC) &&
10898 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10901 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10903 spin_lock_bh(&tp->lock);
10904 if (device_may_wakeup(dp))
10905 tg3_flag_set(tp, WOL_ENABLE);
10907 tg3_flag_clear(tp, WOL_ENABLE);
10908 spin_unlock_bh(&tp->lock);
10913 static u32 tg3_get_msglevel(struct net_device *dev)
10915 struct tg3 *tp = netdev_priv(dev);
10916 return tp->msg_enable;
10919 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10921 struct tg3 *tp = netdev_priv(dev);
10922 tp->msg_enable = value;
10925 static int tg3_nway_reset(struct net_device *dev)
10927 struct tg3 *tp = netdev_priv(dev);
10930 if (!netif_running(dev))
10933 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10936 if (tg3_flag(tp, USE_PHYLIB)) {
10937 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10939 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10943 spin_lock_bh(&tp->lock);
10945 tg3_readphy(tp, MII_BMCR, &bmcr);
10946 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10947 ((bmcr & BMCR_ANENABLE) ||
10948 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10949 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10953 spin_unlock_bh(&tp->lock);
10959 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10961 struct tg3 *tp = netdev_priv(dev);
10963 ering->rx_max_pending = tp->rx_std_ring_mask;
10964 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10965 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10967 ering->rx_jumbo_max_pending = 0;
10969 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10971 ering->rx_pending = tp->rx_pending;
10972 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10973 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10975 ering->rx_jumbo_pending = 0;
10977 ering->tx_pending = tp->napi[0].tx_pending;
10980 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10982 struct tg3 *tp = netdev_priv(dev);
10983 int i, irq_sync = 0, err = 0;
10985 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10986 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10987 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10988 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10989 (tg3_flag(tp, TSO_BUG) &&
10990 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10993 if (netif_running(dev)) {
10995 tg3_netif_stop(tp);
10999 tg3_full_lock(tp, irq_sync);
11001 tp->rx_pending = ering->rx_pending;
11003 if (tg3_flag(tp, MAX_RXPEND_64) &&
11004 tp->rx_pending > 63)
11005 tp->rx_pending = 63;
11006 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11008 for (i = 0; i < tp->irq_max; i++)
11009 tp->napi[i].tx_pending = ering->tx_pending;
11011 if (netif_running(dev)) {
11012 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11013 err = tg3_restart_hw(tp, 1);
11015 tg3_netif_start(tp);
11018 tg3_full_unlock(tp);
11020 if (irq_sync && !err)
11026 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11028 struct tg3 *tp = netdev_priv(dev);
11030 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11032 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11033 epause->rx_pause = 1;
11035 epause->rx_pause = 0;
11037 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11038 epause->tx_pause = 1;
11040 epause->tx_pause = 0;
11043 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11045 struct tg3 *tp = netdev_priv(dev);
11048 if (tg3_flag(tp, USE_PHYLIB)) {
11050 struct phy_device *phydev;
11052 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11054 if (!(phydev->supported & SUPPORTED_Pause) ||
11055 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11056 (epause->rx_pause != epause->tx_pause)))
11059 tp->link_config.flowctrl = 0;
11060 if (epause->rx_pause) {
11061 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11063 if (epause->tx_pause) {
11064 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11065 newadv = ADVERTISED_Pause;
11067 newadv = ADVERTISED_Pause |
11068 ADVERTISED_Asym_Pause;
11069 } else if (epause->tx_pause) {
11070 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11071 newadv = ADVERTISED_Asym_Pause;
11075 if (epause->autoneg)
11076 tg3_flag_set(tp, PAUSE_AUTONEG);
11078 tg3_flag_clear(tp, PAUSE_AUTONEG);
11080 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11081 u32 oldadv = phydev->advertising &
11082 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11083 if (oldadv != newadv) {
11084 phydev->advertising &=
11085 ~(ADVERTISED_Pause |
11086 ADVERTISED_Asym_Pause);
11087 phydev->advertising |= newadv;
11088 if (phydev->autoneg) {
11090 * Always renegotiate the link to
11091 * inform our link partner of our
11092 * flow control settings, even if the
11093 * flow control is forced. Let
11094 * tg3_adjust_link() do the final
11095 * flow control setup.
11097 return phy_start_aneg(phydev);
11101 if (!epause->autoneg)
11102 tg3_setup_flow_control(tp, 0, 0);
11104 tp->link_config.advertising &=
11105 ~(ADVERTISED_Pause |
11106 ADVERTISED_Asym_Pause);
11107 tp->link_config.advertising |= newadv;
11112 if (netif_running(dev)) {
11113 tg3_netif_stop(tp);
11117 tg3_full_lock(tp, irq_sync);
11119 if (epause->autoneg)
11120 tg3_flag_set(tp, PAUSE_AUTONEG);
11122 tg3_flag_clear(tp, PAUSE_AUTONEG);
11123 if (epause->rx_pause)
11124 tp->link_config.flowctrl |= FLOW_CTRL_RX;
11126 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11127 if (epause->tx_pause)
11128 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11130 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11132 if (netif_running(dev)) {
11133 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11134 err = tg3_restart_hw(tp, 1);
11136 tg3_netif_start(tp);
11139 tg3_full_unlock(tp);
11145 static int tg3_get_sset_count(struct net_device *dev, int sset)
11149 return TG3_NUM_TEST;
11151 return TG3_NUM_STATS;
11153 return -EOPNOTSUPP;
11157 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11158 u32 *rules __always_unused)
11160 struct tg3 *tp = netdev_priv(dev);
11162 if (!tg3_flag(tp, SUPPORT_MSIX))
11163 return -EOPNOTSUPP;
11165 switch (info->cmd) {
11166 case ETHTOOL_GRXRINGS:
11167 if (netif_running(tp->dev))
11168 info->data = tp->irq_cnt;
11170 info->data = num_online_cpus();
11171 if (info->data > TG3_IRQ_MAX_VECS_RSS)
11172 info->data = TG3_IRQ_MAX_VECS_RSS;
11175 /* The first interrupt vector only
11176 * handles link interrupts.
11182 return -EOPNOTSUPP;
11186 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11189 struct tg3 *tp = netdev_priv(dev);
11191 if (tg3_flag(tp, SUPPORT_MSIX))
11192 size = TG3_RSS_INDIR_TBL_SIZE;
11197 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11199 struct tg3 *tp = netdev_priv(dev);
11202 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11203 indir[i] = tp->rss_ind_tbl[i];
11208 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11210 struct tg3 *tp = netdev_priv(dev);
11213 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11214 tp->rss_ind_tbl[i] = indir[i];
11216 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11219 /* It is legal to write the indirection
11220 * table while the device is running.
11222 tg3_full_lock(tp, 0);
11223 tg3_rss_write_indir_tbl(tp);
11224 tg3_full_unlock(tp);
11229 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11231 switch (stringset) {
11233 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
11236 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
11239 WARN_ON(1); /* we need a WARN() */
11244 static int tg3_set_phys_id(struct net_device *dev,
11245 enum ethtool_phys_id_state state)
11247 struct tg3 *tp = netdev_priv(dev);
11249 if (!netif_running(tp->dev))
11253 case ETHTOOL_ID_ACTIVE:
11254 return 1; /* cycle on/off once per second */
11256 case ETHTOOL_ID_ON:
11257 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11258 LED_CTRL_1000MBPS_ON |
11259 LED_CTRL_100MBPS_ON |
11260 LED_CTRL_10MBPS_ON |
11261 LED_CTRL_TRAFFIC_OVERRIDE |
11262 LED_CTRL_TRAFFIC_BLINK |
11263 LED_CTRL_TRAFFIC_LED);
11266 case ETHTOOL_ID_OFF:
11267 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11268 LED_CTRL_TRAFFIC_OVERRIDE);
11271 case ETHTOOL_ID_INACTIVE:
11272 tw32(MAC_LED_CTRL, tp->led_ctrl);
11279 static void tg3_get_ethtool_stats(struct net_device *dev,
11280 struct ethtool_stats *estats, u64 *tmp_stats)
11282 struct tg3 *tp = netdev_priv(dev);
11285 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11287 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11290 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11294 u32 offset = 0, len = 0;
11297 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11300 if (magic == TG3_EEPROM_MAGIC) {
11301 for (offset = TG3_NVM_DIR_START;
11302 offset < TG3_NVM_DIR_END;
11303 offset += TG3_NVM_DIRENT_SIZE) {
11304 if (tg3_nvram_read(tp, offset, &val))
11307 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11308 TG3_NVM_DIRTYPE_EXTVPD)
11312 if (offset != TG3_NVM_DIR_END) {
11313 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11314 if (tg3_nvram_read(tp, offset + 4, &offset))
11317 offset = tg3_nvram_logical_addr(tp, offset);
11321 if (!offset || !len) {
11322 offset = TG3_NVM_VPD_OFF;
11323 len = TG3_NVM_VPD_LEN;
11326 buf = kmalloc(len, GFP_KERNEL);
11330 if (magic == TG3_EEPROM_MAGIC) {
11331 for (i = 0; i < len; i += 4) {
11332 /* The data is in little-endian format in NVRAM.
11333 * Use the big-endian read routines to preserve
11334 * the byte order as it exists in NVRAM.
11336 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11342 unsigned int pos = 0;
11344 ptr = (u8 *)&buf[0];
11345 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11346 cnt = pci_read_vpd(tp->pdev, pos,
11348 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11366 #define NVRAM_TEST_SIZE 0x100
11367 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11368 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11369 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11370 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11371 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11372 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11373 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11374 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11376 static int tg3_test_nvram(struct tg3 *tp)
11378 u32 csum, magic, len;
11380 int i, j, k, err = 0, size;
11382 if (tg3_flag(tp, NO_NVRAM))
11385 if (tg3_nvram_read(tp, 0, &magic) != 0)
11388 if (magic == TG3_EEPROM_MAGIC)
11389 size = NVRAM_TEST_SIZE;
11390 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11391 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11392 TG3_EEPROM_SB_FORMAT_1) {
11393 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11394 case TG3_EEPROM_SB_REVISION_0:
11395 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11397 case TG3_EEPROM_SB_REVISION_2:
11398 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11400 case TG3_EEPROM_SB_REVISION_3:
11401 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11403 case TG3_EEPROM_SB_REVISION_4:
11404 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11406 case TG3_EEPROM_SB_REVISION_5:
11407 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11409 case TG3_EEPROM_SB_REVISION_6:
11410 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11417 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11418 size = NVRAM_SELFBOOT_HW_SIZE;
11422 buf = kmalloc(size, GFP_KERNEL);
11427 for (i = 0, j = 0; i < size; i += 4, j++) {
11428 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11435 /* Selfboot format */
11436 magic = be32_to_cpu(buf[0]);
11437 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11438 TG3_EEPROM_MAGIC_FW) {
11439 u8 *buf8 = (u8 *) buf, csum8 = 0;
11441 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11442 TG3_EEPROM_SB_REVISION_2) {
11443 /* For rev 2, the csum doesn't include the MBA. */
11444 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11446 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11449 for (i = 0; i < size; i++)
11462 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11463 TG3_EEPROM_MAGIC_HW) {
11464 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11465 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11466 u8 *buf8 = (u8 *) buf;
11468 /* Separate the parity bits and the data bytes. */
11469 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11470 if ((i == 0) || (i == 8)) {
11474 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11475 parity[k++] = buf8[i] & msk;
11477 } else if (i == 16) {
11481 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11482 parity[k++] = buf8[i] & msk;
11485 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11486 parity[k++] = buf8[i] & msk;
11489 data[j++] = buf8[i];
11493 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11494 u8 hw8 = hweight8(data[i]);
11496 if ((hw8 & 0x1) && parity[i])
11498 else if (!(hw8 & 0x1) && !parity[i])
11507 /* Bootstrap checksum at offset 0x10 */
11508 csum = calc_crc((unsigned char *) buf, 0x10);
11509 if (csum != le32_to_cpu(buf[0x10/4]))
11512 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11513 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11514 if (csum != le32_to_cpu(buf[0xfc/4]))
11519 buf = tg3_vpd_readblock(tp, &len);
11523 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11525 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11529 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11532 i += PCI_VPD_LRDT_TAG_SIZE;
11533 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11534 PCI_VPD_RO_KEYWORD_CHKSUM);
11538 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11540 for (i = 0; i <= j; i++)
11541 csum8 += ((u8 *)buf)[i];
11555 #define TG3_SERDES_TIMEOUT_SEC 2
11556 #define TG3_COPPER_TIMEOUT_SEC 6
11558 static int tg3_test_link(struct tg3 *tp)
11562 if (!netif_running(tp->dev))
11565 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11566 max = TG3_SERDES_TIMEOUT_SEC;
11568 max = TG3_COPPER_TIMEOUT_SEC;
11570 for (i = 0; i < max; i++) {
11571 if (netif_carrier_ok(tp->dev))
11574 if (msleep_interruptible(1000))
11581 /* Only test the commonly used registers */
11582 static int tg3_test_registers(struct tg3 *tp)
11584 int i, is_5705, is_5750;
11585 u32 offset, read_mask, write_mask, val, save_val, read_val;
11589 #define TG3_FL_5705 0x1
11590 #define TG3_FL_NOT_5705 0x2
11591 #define TG3_FL_NOT_5788 0x4
11592 #define TG3_FL_NOT_5750 0x8
11596 /* MAC Control Registers */
11597 { MAC_MODE, TG3_FL_NOT_5705,
11598 0x00000000, 0x00ef6f8c },
11599 { MAC_MODE, TG3_FL_5705,
11600 0x00000000, 0x01ef6b8c },
11601 { MAC_STATUS, TG3_FL_NOT_5705,
11602 0x03800107, 0x00000000 },
11603 { MAC_STATUS, TG3_FL_5705,
11604 0x03800100, 0x00000000 },
11605 { MAC_ADDR_0_HIGH, 0x0000,
11606 0x00000000, 0x0000ffff },
11607 { MAC_ADDR_0_LOW, 0x0000,
11608 0x00000000, 0xffffffff },
11609 { MAC_RX_MTU_SIZE, 0x0000,
11610 0x00000000, 0x0000ffff },
11611 { MAC_TX_MODE, 0x0000,
11612 0x00000000, 0x00000070 },
11613 { MAC_TX_LENGTHS, 0x0000,
11614 0x00000000, 0x00003fff },
11615 { MAC_RX_MODE, TG3_FL_NOT_5705,
11616 0x00000000, 0x000007fc },
11617 { MAC_RX_MODE, TG3_FL_5705,
11618 0x00000000, 0x000007dc },
11619 { MAC_HASH_REG_0, 0x0000,
11620 0x00000000, 0xffffffff },
11621 { MAC_HASH_REG_1, 0x0000,
11622 0x00000000, 0xffffffff },
11623 { MAC_HASH_REG_2, 0x0000,
11624 0x00000000, 0xffffffff },
11625 { MAC_HASH_REG_3, 0x0000,
11626 0x00000000, 0xffffffff },
11628 /* Receive Data and Receive BD Initiator Control Registers. */
11629 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11630 0x00000000, 0xffffffff },
11631 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11632 0x00000000, 0xffffffff },
11633 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11634 0x00000000, 0x00000003 },
11635 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11636 0x00000000, 0xffffffff },
11637 { RCVDBDI_STD_BD+0, 0x0000,
11638 0x00000000, 0xffffffff },
11639 { RCVDBDI_STD_BD+4, 0x0000,
11640 0x00000000, 0xffffffff },
11641 { RCVDBDI_STD_BD+8, 0x0000,
11642 0x00000000, 0xffff0002 },
11643 { RCVDBDI_STD_BD+0xc, 0x0000,
11644 0x00000000, 0xffffffff },
11646 /* Receive BD Initiator Control Registers. */
11647 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11648 0x00000000, 0xffffffff },
11649 { RCVBDI_STD_THRESH, TG3_FL_5705,
11650 0x00000000, 0x000003ff },
11651 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11652 0x00000000, 0xffffffff },
11654 /* Host Coalescing Control Registers. */
11655 { HOSTCC_MODE, TG3_FL_NOT_5705,
11656 0x00000000, 0x00000004 },
11657 { HOSTCC_MODE, TG3_FL_5705,
11658 0x00000000, 0x000000f6 },
11659 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11660 0x00000000, 0xffffffff },
11661 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11662 0x00000000, 0x000003ff },
11663 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11664 0x00000000, 0xffffffff },
11665 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11666 0x00000000, 0x000003ff },
11667 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11668 0x00000000, 0xffffffff },
11669 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11670 0x00000000, 0x000000ff },
11671 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11672 0x00000000, 0xffffffff },
11673 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11674 0x00000000, 0x000000ff },
11675 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11676 0x00000000, 0xffffffff },
11677 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11678 0x00000000, 0xffffffff },
11679 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11680 0x00000000, 0xffffffff },
11681 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11682 0x00000000, 0x000000ff },
11683 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11684 0x00000000, 0xffffffff },
11685 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11686 0x00000000, 0x000000ff },
11687 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11688 0x00000000, 0xffffffff },
11689 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11690 0x00000000, 0xffffffff },
11691 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11692 0x00000000, 0xffffffff },
11693 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11694 0x00000000, 0xffffffff },
11695 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11696 0x00000000, 0xffffffff },
11697 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11698 0xffffffff, 0x00000000 },
11699 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11700 0xffffffff, 0x00000000 },
11702 /* Buffer Manager Control Registers. */
11703 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11704 0x00000000, 0x007fff80 },
11705 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11706 0x00000000, 0x007fffff },
11707 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11708 0x00000000, 0x0000003f },
11709 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11710 0x00000000, 0x000001ff },
11711 { BUFMGR_MB_HIGH_WATER, 0x0000,
11712 0x00000000, 0x000001ff },
11713 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11714 0xffffffff, 0x00000000 },
11715 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11716 0xffffffff, 0x00000000 },
11718 /* Mailbox Registers */
11719 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11720 0x00000000, 0x000001ff },
11721 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11722 0x00000000, 0x000001ff },
11723 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11724 0x00000000, 0x000007ff },
11725 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11726 0x00000000, 0x000001ff },
11728 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11731 is_5705 = is_5750 = 0;
11732 if (tg3_flag(tp, 5705_PLUS)) {
11734 if (tg3_flag(tp, 5750_PLUS))
11738 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11739 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11742 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11745 if (tg3_flag(tp, IS_5788) &&
11746 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11749 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11752 offset = (u32) reg_tbl[i].offset;
11753 read_mask = reg_tbl[i].read_mask;
11754 write_mask = reg_tbl[i].write_mask;
11756 /* Save the original register content */
11757 save_val = tr32(offset);
11759 /* Determine the read-only value. */
11760 read_val = save_val & read_mask;
11762 /* Write zero to the register, then make sure the read-only bits
11763 * are not changed and the read/write bits are all zeros.
11767 val = tr32(offset);
11769 /* Test the read-only and read/write bits. */
11770 if (((val & read_mask) != read_val) || (val & write_mask))
11773 /* Write ones to all the bits defined by RdMask and WrMask, then
11774 * make sure the read-only bits are not changed and the
11775 * read/write bits are all ones.
11777 tw32(offset, read_mask | write_mask);
11779 val = tr32(offset);
11781 /* Test the read-only bits. */
11782 if ((val & read_mask) != read_val)
11785 /* Test the read/write bits. */
11786 if ((val & write_mask) != write_mask)
11789 tw32(offset, save_val);
11795 if (netif_msg_hw(tp))
11796 netdev_err(tp->dev,
11797 "Register test failed at offset %x\n", offset);
11798 tw32(offset, save_val);
11802 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11804 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11808 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11809 for (j = 0; j < len; j += 4) {
11812 tg3_write_mem(tp, offset + j, test_pattern[i]);
11813 tg3_read_mem(tp, offset + j, &val);
11814 if (val != test_pattern[i])
11821 static int tg3_test_memory(struct tg3 *tp)
11823 static struct mem_entry {
11826 } mem_tbl_570x[] = {
11827 { 0x00000000, 0x00b50},
11828 { 0x00002000, 0x1c000},
11829 { 0xffffffff, 0x00000}
11830 }, mem_tbl_5705[] = {
11831 { 0x00000100, 0x0000c},
11832 { 0x00000200, 0x00008},
11833 { 0x00004000, 0x00800},
11834 { 0x00006000, 0x01000},
11835 { 0x00008000, 0x02000},
11836 { 0x00010000, 0x0e000},
11837 { 0xffffffff, 0x00000}
11838 }, mem_tbl_5755[] = {
11839 { 0x00000200, 0x00008},
11840 { 0x00004000, 0x00800},
11841 { 0x00006000, 0x00800},
11842 { 0x00008000, 0x02000},
11843 { 0x00010000, 0x0c000},
11844 { 0xffffffff, 0x00000}
11845 }, mem_tbl_5906[] = {
11846 { 0x00000200, 0x00008},
11847 { 0x00004000, 0x00400},
11848 { 0x00006000, 0x00400},
11849 { 0x00008000, 0x01000},
11850 { 0x00010000, 0x01000},
11851 { 0xffffffff, 0x00000}
11852 }, mem_tbl_5717[] = {
11853 { 0x00000200, 0x00008},
11854 { 0x00010000, 0x0a000},
11855 { 0x00020000, 0x13c00},
11856 { 0xffffffff, 0x00000}
11857 }, mem_tbl_57765[] = {
11858 { 0x00000200, 0x00008},
11859 { 0x00004000, 0x00800},
11860 { 0x00006000, 0x09800},
11861 { 0x00010000, 0x0a000},
11862 { 0xffffffff, 0x00000}
11864 struct mem_entry *mem_tbl;
11868 if (tg3_flag(tp, 5717_PLUS))
11869 mem_tbl = mem_tbl_5717;
11870 else if (tg3_flag(tp, 57765_CLASS))
11871 mem_tbl = mem_tbl_57765;
11872 else if (tg3_flag(tp, 5755_PLUS))
11873 mem_tbl = mem_tbl_5755;
11874 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11875 mem_tbl = mem_tbl_5906;
11876 else if (tg3_flag(tp, 5705_PLUS))
11877 mem_tbl = mem_tbl_5705;
11879 mem_tbl = mem_tbl_570x;
11881 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11882 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11890 #define TG3_TSO_MSS 500
11892 #define TG3_TSO_IP_HDR_LEN 20
11893 #define TG3_TSO_TCP_HDR_LEN 20
11894 #define TG3_TSO_TCP_OPT_LEN 12
11896 static const u8 tg3_tso_header[] = {
11898 0x45, 0x00, 0x00, 0x00,
11899 0x00, 0x00, 0x40, 0x00,
11900 0x40, 0x06, 0x00, 0x00,
11901 0x0a, 0x00, 0x00, 0x01,
11902 0x0a, 0x00, 0x00, 0x02,
11903 0x0d, 0x00, 0xe0, 0x00,
11904 0x00, 0x00, 0x01, 0x00,
11905 0x00, 0x00, 0x02, 0x00,
11906 0x80, 0x10, 0x10, 0x00,
11907 0x14, 0x09, 0x00, 0x00,
11908 0x01, 0x01, 0x08, 0x0a,
11909 0x11, 0x11, 0x11, 0x11,
11910 0x11, 0x11, 0x11, 0x11,
11913 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11915 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11916 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11918 struct sk_buff *skb;
11919 u8 *tx_data, *rx_data;
11921 int num_pkts, tx_len, rx_len, i, err;
11922 struct tg3_rx_buffer_desc *desc;
11923 struct tg3_napi *tnapi, *rnapi;
11924 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11926 tnapi = &tp->napi[0];
11927 rnapi = &tp->napi[0];
11928 if (tp->irq_cnt > 1) {
11929 if (tg3_flag(tp, ENABLE_RSS))
11930 rnapi = &tp->napi[1];
11931 if (tg3_flag(tp, ENABLE_TSS))
11932 tnapi = &tp->napi[1];
11934 coal_now = tnapi->coal_now | rnapi->coal_now;
11939 skb = netdev_alloc_skb(tp->dev, tx_len);
11943 tx_data = skb_put(skb, tx_len);
11944 memcpy(tx_data, tp->dev->dev_addr, 6);
11945 memset(tx_data + 6, 0x0, 8);
11947 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11949 if (tso_loopback) {
11950 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11952 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11953 TG3_TSO_TCP_OPT_LEN;
11955 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11956 sizeof(tg3_tso_header));
11959 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11960 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11962 /* Set the total length field in the IP header */
11963 iph->tot_len = htons((u16)(mss + hdr_len));
11965 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11966 TXD_FLAG_CPU_POST_DMA);
11968 if (tg3_flag(tp, HW_TSO_1) ||
11969 tg3_flag(tp, HW_TSO_2) ||
11970 tg3_flag(tp, HW_TSO_3)) {
11972 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11973 th = (struct tcphdr *)&tx_data[val];
11976 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11978 if (tg3_flag(tp, HW_TSO_3)) {
11979 mss |= (hdr_len & 0xc) << 12;
11980 if (hdr_len & 0x10)
11981 base_flags |= 0x00000010;
11982 base_flags |= (hdr_len & 0x3e0) << 5;
11983 } else if (tg3_flag(tp, HW_TSO_2))
11984 mss |= hdr_len << 9;
11985 else if (tg3_flag(tp, HW_TSO_1) ||
11986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11987 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11989 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11992 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11995 data_off = ETH_HLEN;
11997 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11998 tx_len > VLAN_ETH_FRAME_LEN)
11999 base_flags |= TXD_FLAG_JMB_PKT;
12002 for (i = data_off; i < tx_len; i++)
12003 tx_data[i] = (u8) (i & 0xff);
12005 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12006 if (pci_dma_mapping_error(tp->pdev, map)) {
12007 dev_kfree_skb(skb);
12011 val = tnapi->tx_prod;
12012 tnapi->tx_buffers[val].skb = skb;
12013 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12015 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12020 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12022 budget = tg3_tx_avail(tnapi);
12023 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12024 base_flags | TXD_FLAG_END, mss, 0)) {
12025 tnapi->tx_buffers[val].skb = NULL;
12026 dev_kfree_skb(skb);
12032 /* Sync BD data before updating mailbox */
12035 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12036 tr32_mailbox(tnapi->prodmbox);
12040 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
12041 for (i = 0; i < 35; i++) {
12042 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12047 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12048 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12049 if ((tx_idx == tnapi->tx_prod) &&
12050 (rx_idx == (rx_start_idx + num_pkts)))
12054 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12055 dev_kfree_skb(skb);
12057 if (tx_idx != tnapi->tx_prod)
12060 if (rx_idx != rx_start_idx + num_pkts)
12064 while (rx_idx != rx_start_idx) {
12065 desc = &rnapi->rx_rcb[rx_start_idx++];
12066 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12067 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12069 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12070 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12073 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12076 if (!tso_loopback) {
12077 if (rx_len != tx_len)
12080 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12081 if (opaque_key != RXD_OPAQUE_RING_STD)
12084 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12087 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12088 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12089 >> RXD_TCPCSUM_SHIFT != 0xffff) {
12093 if (opaque_key == RXD_OPAQUE_RING_STD) {
12094 rx_data = tpr->rx_std_buffers[desc_idx].data;
12095 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12097 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12098 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12099 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12104 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12105 PCI_DMA_FROMDEVICE);
12107 rx_data += TG3_RX_OFFSET(tp);
12108 for (i = data_off; i < rx_len; i++, val++) {
12109 if (*(rx_data + i) != (u8) (val & 0xff))
12116 /* tg3_free_rings will unmap and free the rx_data */
12121 #define TG3_STD_LOOPBACK_FAILED 1
12122 #define TG3_JMB_LOOPBACK_FAILED 2
12123 #define TG3_TSO_LOOPBACK_FAILED 4
12124 #define TG3_LOOPBACK_FAILED \
12125 (TG3_STD_LOOPBACK_FAILED | \
12126 TG3_JMB_LOOPBACK_FAILED | \
12127 TG3_TSO_LOOPBACK_FAILED)
12129 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12133 u32 jmb_pkt_sz = 9000;
12136 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12138 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12139 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12141 if (!netif_running(tp->dev)) {
12142 data[0] = TG3_LOOPBACK_FAILED;
12143 data[1] = TG3_LOOPBACK_FAILED;
12145 data[2] = TG3_LOOPBACK_FAILED;
12149 err = tg3_reset_hw(tp, 1);
12151 data[0] = TG3_LOOPBACK_FAILED;
12152 data[1] = TG3_LOOPBACK_FAILED;
12154 data[2] = TG3_LOOPBACK_FAILED;
12158 if (tg3_flag(tp, ENABLE_RSS)) {
12161 /* Reroute all rx packets to the 1st queue */
12162 for (i = MAC_RSS_INDIR_TBL_0;
12163 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12167 /* HW errata - mac loopback fails in some cases on 5780.
12168 * Normal traffic and PHY loopback are not affected by
12169 * errata. Also, the MAC loopback test is deprecated for
12170 * all newer ASIC revisions.
12172 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12173 !tg3_flag(tp, CPMU_PRESENT)) {
12174 tg3_mac_loopback(tp, true);
12176 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12177 data[0] |= TG3_STD_LOOPBACK_FAILED;
12179 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12180 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12181 data[0] |= TG3_JMB_LOOPBACK_FAILED;
12183 tg3_mac_loopback(tp, false);
12186 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12187 !tg3_flag(tp, USE_PHYLIB)) {
12190 tg3_phy_lpbk_set(tp, 0, false);
12192 /* Wait for link */
12193 for (i = 0; i < 100; i++) {
12194 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12199 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12200 data[1] |= TG3_STD_LOOPBACK_FAILED;
12201 if (tg3_flag(tp, TSO_CAPABLE) &&
12202 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12203 data[1] |= TG3_TSO_LOOPBACK_FAILED;
12204 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12205 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12206 data[1] |= TG3_JMB_LOOPBACK_FAILED;
12209 tg3_phy_lpbk_set(tp, 0, true);
12211 /* All link indications report up, but the hardware
12212 * isn't really ready for about 20 msec. Double it
12217 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12218 data[2] |= TG3_STD_LOOPBACK_FAILED;
12219 if (tg3_flag(tp, TSO_CAPABLE) &&
12220 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12221 data[2] |= TG3_TSO_LOOPBACK_FAILED;
12222 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12223 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12224 data[2] |= TG3_JMB_LOOPBACK_FAILED;
12227 /* Re-enable gphy autopowerdown. */
12228 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12229 tg3_phy_toggle_apd(tp, true);
12232 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
12235 tp->phy_flags |= eee_cap;
12240 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12243 struct tg3 *tp = netdev_priv(dev);
12244 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12246 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12247 tg3_power_up(tp)) {
12248 etest->flags |= ETH_TEST_FL_FAILED;
12249 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12253 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12255 if (tg3_test_nvram(tp) != 0) {
12256 etest->flags |= ETH_TEST_FL_FAILED;
12259 if (!doextlpbk && tg3_test_link(tp)) {
12260 etest->flags |= ETH_TEST_FL_FAILED;
12263 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12264 int err, err2 = 0, irq_sync = 0;
12266 if (netif_running(dev)) {
12268 tg3_netif_stop(tp);
12272 tg3_full_lock(tp, irq_sync);
12274 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12275 err = tg3_nvram_lock(tp);
12276 tg3_halt_cpu(tp, RX_CPU_BASE);
12277 if (!tg3_flag(tp, 5705_PLUS))
12278 tg3_halt_cpu(tp, TX_CPU_BASE);
12280 tg3_nvram_unlock(tp);
12282 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12285 if (tg3_test_registers(tp) != 0) {
12286 etest->flags |= ETH_TEST_FL_FAILED;
12290 if (tg3_test_memory(tp) != 0) {
12291 etest->flags |= ETH_TEST_FL_FAILED;
12296 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12298 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12299 etest->flags |= ETH_TEST_FL_FAILED;
12301 tg3_full_unlock(tp);
12303 if (tg3_test_interrupt(tp) != 0) {
12304 etest->flags |= ETH_TEST_FL_FAILED;
12308 tg3_full_lock(tp, 0);
12310 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12311 if (netif_running(dev)) {
12312 tg3_flag_set(tp, INIT_COMPLETE);
12313 err2 = tg3_restart_hw(tp, 1);
12315 tg3_netif_start(tp);
12318 tg3_full_unlock(tp);
12320 if (irq_sync && !err2)
12323 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12324 tg3_power_down(tp);
12328 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12330 struct mii_ioctl_data *data = if_mii(ifr);
12331 struct tg3 *tp = netdev_priv(dev);
12334 if (tg3_flag(tp, USE_PHYLIB)) {
12335 struct phy_device *phydev;
12336 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12338 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12339 return phy_mii_ioctl(phydev, ifr, cmd);
12344 data->phy_id = tp->phy_addr;
12347 case SIOCGMIIREG: {
12350 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12351 break; /* We have no PHY */
12353 if (!netif_running(dev))
12356 spin_lock_bh(&tp->lock);
12357 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12358 spin_unlock_bh(&tp->lock);
12360 data->val_out = mii_regval;
12366 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12367 break; /* We have no PHY */
12369 if (!netif_running(dev))
12372 spin_lock_bh(&tp->lock);
12373 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12374 spin_unlock_bh(&tp->lock);
12382 return -EOPNOTSUPP;
12385 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12387 struct tg3 *tp = netdev_priv(dev);
12389 memcpy(ec, &tp->coal, sizeof(*ec));
12393 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12395 struct tg3 *tp = netdev_priv(dev);
12396 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12397 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12399 if (!tg3_flag(tp, 5705_PLUS)) {
12400 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12401 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12402 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12403 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12406 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12407 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12408 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12409 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12410 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12411 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12412 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12413 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12414 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12415 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12418 /* No rx interrupts will be generated if both are zero */
12419 if ((ec->rx_coalesce_usecs == 0) &&
12420 (ec->rx_max_coalesced_frames == 0))
12423 /* No tx interrupts will be generated if both are zero */
12424 if ((ec->tx_coalesce_usecs == 0) &&
12425 (ec->tx_max_coalesced_frames == 0))
12428 /* Only copy relevant parameters, ignore all others. */
12429 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12430 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12431 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12432 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12433 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12434 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12435 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12436 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12437 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12439 if (netif_running(dev)) {
12440 tg3_full_lock(tp, 0);
12441 __tg3_set_coalesce(tp, &tp->coal);
12442 tg3_full_unlock(tp);
12447 static const struct ethtool_ops tg3_ethtool_ops = {
12448 .get_settings = tg3_get_settings,
12449 .set_settings = tg3_set_settings,
12450 .get_drvinfo = tg3_get_drvinfo,
12451 .get_regs_len = tg3_get_regs_len,
12452 .get_regs = tg3_get_regs,
12453 .get_wol = tg3_get_wol,
12454 .set_wol = tg3_set_wol,
12455 .get_msglevel = tg3_get_msglevel,
12456 .set_msglevel = tg3_set_msglevel,
12457 .nway_reset = tg3_nway_reset,
12458 .get_link = ethtool_op_get_link,
12459 .get_eeprom_len = tg3_get_eeprom_len,
12460 .get_eeprom = tg3_get_eeprom,
12461 .set_eeprom = tg3_set_eeprom,
12462 .get_ringparam = tg3_get_ringparam,
12463 .set_ringparam = tg3_set_ringparam,
12464 .get_pauseparam = tg3_get_pauseparam,
12465 .set_pauseparam = tg3_set_pauseparam,
12466 .self_test = tg3_self_test,
12467 .get_strings = tg3_get_strings,
12468 .set_phys_id = tg3_set_phys_id,
12469 .get_ethtool_stats = tg3_get_ethtool_stats,
12470 .get_coalesce = tg3_get_coalesce,
12471 .set_coalesce = tg3_set_coalesce,
12472 .get_sset_count = tg3_get_sset_count,
12473 .get_rxnfc = tg3_get_rxnfc,
12474 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12475 .get_rxfh_indir = tg3_get_rxfh_indir,
12476 .set_rxfh_indir = tg3_set_rxfh_indir,
12477 .get_ts_info = ethtool_op_get_ts_info,
12480 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12481 struct rtnl_link_stats64 *stats)
12483 struct tg3 *tp = netdev_priv(dev);
12486 return &tp->net_stats_prev;
12488 spin_lock_bh(&tp->lock);
12489 tg3_get_nstats(tp, stats);
12490 spin_unlock_bh(&tp->lock);
12495 static void tg3_set_rx_mode(struct net_device *dev)
12497 struct tg3 *tp = netdev_priv(dev);
12499 if (!netif_running(dev))
12502 tg3_full_lock(tp, 0);
12503 __tg3_set_rx_mode(dev);
12504 tg3_full_unlock(tp);
12507 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12510 dev->mtu = new_mtu;
12512 if (new_mtu > ETH_DATA_LEN) {
12513 if (tg3_flag(tp, 5780_CLASS)) {
12514 netdev_update_features(dev);
12515 tg3_flag_clear(tp, TSO_CAPABLE);
12517 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12520 if (tg3_flag(tp, 5780_CLASS)) {
12521 tg3_flag_set(tp, TSO_CAPABLE);
12522 netdev_update_features(dev);
12524 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12528 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12530 struct tg3 *tp = netdev_priv(dev);
12531 int err, reset_phy = 0;
12533 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12536 if (!netif_running(dev)) {
12537 /* We'll just catch it later when the
12540 tg3_set_mtu(dev, tp, new_mtu);
12546 tg3_netif_stop(tp);
12548 tg3_full_lock(tp, 1);
12550 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12552 tg3_set_mtu(dev, tp, new_mtu);
12554 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12555 * breaks all requests to 256 bytes.
12557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12560 err = tg3_restart_hw(tp, reset_phy);
12563 tg3_netif_start(tp);
12565 tg3_full_unlock(tp);
12573 static const struct net_device_ops tg3_netdev_ops = {
12574 .ndo_open = tg3_open,
12575 .ndo_stop = tg3_close,
12576 .ndo_start_xmit = tg3_start_xmit,
12577 .ndo_get_stats64 = tg3_get_stats64,
12578 .ndo_validate_addr = eth_validate_addr,
12579 .ndo_set_rx_mode = tg3_set_rx_mode,
12580 .ndo_set_mac_address = tg3_set_mac_addr,
12581 .ndo_do_ioctl = tg3_ioctl,
12582 .ndo_tx_timeout = tg3_tx_timeout,
12583 .ndo_change_mtu = tg3_change_mtu,
12584 .ndo_fix_features = tg3_fix_features,
12585 .ndo_set_features = tg3_set_features,
12586 #ifdef CONFIG_NET_POLL_CONTROLLER
12587 .ndo_poll_controller = tg3_poll_controller,
12591 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12593 u32 cursize, val, magic;
12595 tp->nvram_size = EEPROM_CHIP_SIZE;
12597 if (tg3_nvram_read(tp, 0, &magic) != 0)
12600 if ((magic != TG3_EEPROM_MAGIC) &&
12601 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12602 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12606 * Size the chip by reading offsets at increasing powers of two.
12607 * When we encounter our validation signature, we know the addressing
12608 * has wrapped around, and thus have our chip size.
12612 while (cursize < tp->nvram_size) {
12613 if (tg3_nvram_read(tp, cursize, &val) != 0)
12622 tp->nvram_size = cursize;
12625 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12629 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12632 /* Selfboot format */
12633 if (val != TG3_EEPROM_MAGIC) {
12634 tg3_get_eeprom_size(tp);
12638 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12640 /* This is confusing. We want to operate on the
12641 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12642 * call will read from NVRAM and byteswap the data
12643 * according to the byteswapping settings for all
12644 * other register accesses. This ensures the data we
12645 * want will always reside in the lower 16-bits.
12646 * However, the data in NVRAM is in LE format, which
12647 * means the data from the NVRAM read will always be
12648 * opposite the endianness of the CPU. The 16-bit
12649 * byteswap then brings the data to CPU endianness.
12651 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12655 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12658 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12662 nvcfg1 = tr32(NVRAM_CFG1);
12663 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12664 tg3_flag_set(tp, FLASH);
12666 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12667 tw32(NVRAM_CFG1, nvcfg1);
12670 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12671 tg3_flag(tp, 5780_CLASS)) {
12672 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12673 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12674 tp->nvram_jedecnum = JEDEC_ATMEL;
12675 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12676 tg3_flag_set(tp, NVRAM_BUFFERED);
12678 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12679 tp->nvram_jedecnum = JEDEC_ATMEL;
12680 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12682 case FLASH_VENDOR_ATMEL_EEPROM:
12683 tp->nvram_jedecnum = JEDEC_ATMEL;
12684 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12685 tg3_flag_set(tp, NVRAM_BUFFERED);
12687 case FLASH_VENDOR_ST:
12688 tp->nvram_jedecnum = JEDEC_ST;
12689 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12690 tg3_flag_set(tp, NVRAM_BUFFERED);
12692 case FLASH_VENDOR_SAIFUN:
12693 tp->nvram_jedecnum = JEDEC_SAIFUN;
12694 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12696 case FLASH_VENDOR_SST_SMALL:
12697 case FLASH_VENDOR_SST_LARGE:
12698 tp->nvram_jedecnum = JEDEC_SST;
12699 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12703 tp->nvram_jedecnum = JEDEC_ATMEL;
12704 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12705 tg3_flag_set(tp, NVRAM_BUFFERED);
12709 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12711 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12712 case FLASH_5752PAGE_SIZE_256:
12713 tp->nvram_pagesize = 256;
12715 case FLASH_5752PAGE_SIZE_512:
12716 tp->nvram_pagesize = 512;
12718 case FLASH_5752PAGE_SIZE_1K:
12719 tp->nvram_pagesize = 1024;
12721 case FLASH_5752PAGE_SIZE_2K:
12722 tp->nvram_pagesize = 2048;
12724 case FLASH_5752PAGE_SIZE_4K:
12725 tp->nvram_pagesize = 4096;
12727 case FLASH_5752PAGE_SIZE_264:
12728 tp->nvram_pagesize = 264;
12730 case FLASH_5752PAGE_SIZE_528:
12731 tp->nvram_pagesize = 528;
12736 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12740 nvcfg1 = tr32(NVRAM_CFG1);
12742 /* NVRAM protection for TPM */
12743 if (nvcfg1 & (1 << 27))
12744 tg3_flag_set(tp, PROTECTED_NVRAM);
12746 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12747 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12748 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12749 tp->nvram_jedecnum = JEDEC_ATMEL;
12750 tg3_flag_set(tp, NVRAM_BUFFERED);
12752 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12753 tp->nvram_jedecnum = JEDEC_ATMEL;
12754 tg3_flag_set(tp, NVRAM_BUFFERED);
12755 tg3_flag_set(tp, FLASH);
12757 case FLASH_5752VENDOR_ST_M45PE10:
12758 case FLASH_5752VENDOR_ST_M45PE20:
12759 case FLASH_5752VENDOR_ST_M45PE40:
12760 tp->nvram_jedecnum = JEDEC_ST;
12761 tg3_flag_set(tp, NVRAM_BUFFERED);
12762 tg3_flag_set(tp, FLASH);
12766 if (tg3_flag(tp, FLASH)) {
12767 tg3_nvram_get_pagesize(tp, nvcfg1);
12769 /* For eeprom, set pagesize to maximum eeprom size */
12770 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12772 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12773 tw32(NVRAM_CFG1, nvcfg1);
12777 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12779 u32 nvcfg1, protect = 0;
12781 nvcfg1 = tr32(NVRAM_CFG1);
12783 /* NVRAM protection for TPM */
12784 if (nvcfg1 & (1 << 27)) {
12785 tg3_flag_set(tp, PROTECTED_NVRAM);
12789 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12791 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12792 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12793 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12794 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12795 tp->nvram_jedecnum = JEDEC_ATMEL;
12796 tg3_flag_set(tp, NVRAM_BUFFERED);
12797 tg3_flag_set(tp, FLASH);
12798 tp->nvram_pagesize = 264;
12799 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12800 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12801 tp->nvram_size = (protect ? 0x3e200 :
12802 TG3_NVRAM_SIZE_512KB);
12803 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12804 tp->nvram_size = (protect ? 0x1f200 :
12805 TG3_NVRAM_SIZE_256KB);
12807 tp->nvram_size = (protect ? 0x1f200 :
12808 TG3_NVRAM_SIZE_128KB);
12810 case FLASH_5752VENDOR_ST_M45PE10:
12811 case FLASH_5752VENDOR_ST_M45PE20:
12812 case FLASH_5752VENDOR_ST_M45PE40:
12813 tp->nvram_jedecnum = JEDEC_ST;
12814 tg3_flag_set(tp, NVRAM_BUFFERED);
12815 tg3_flag_set(tp, FLASH);
12816 tp->nvram_pagesize = 256;
12817 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12818 tp->nvram_size = (protect ?
12819 TG3_NVRAM_SIZE_64KB :
12820 TG3_NVRAM_SIZE_128KB);
12821 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12822 tp->nvram_size = (protect ?
12823 TG3_NVRAM_SIZE_64KB :
12824 TG3_NVRAM_SIZE_256KB);
12826 tp->nvram_size = (protect ?
12827 TG3_NVRAM_SIZE_128KB :
12828 TG3_NVRAM_SIZE_512KB);
12833 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12837 nvcfg1 = tr32(NVRAM_CFG1);
12839 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12840 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12841 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12842 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12843 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12844 tp->nvram_jedecnum = JEDEC_ATMEL;
12845 tg3_flag_set(tp, NVRAM_BUFFERED);
12846 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12848 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12849 tw32(NVRAM_CFG1, nvcfg1);
12851 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12852 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12853 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12854 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12855 tp->nvram_jedecnum = JEDEC_ATMEL;
12856 tg3_flag_set(tp, NVRAM_BUFFERED);
12857 tg3_flag_set(tp, FLASH);
12858 tp->nvram_pagesize = 264;
12860 case FLASH_5752VENDOR_ST_M45PE10:
12861 case FLASH_5752VENDOR_ST_M45PE20:
12862 case FLASH_5752VENDOR_ST_M45PE40:
12863 tp->nvram_jedecnum = JEDEC_ST;
12864 tg3_flag_set(tp, NVRAM_BUFFERED);
12865 tg3_flag_set(tp, FLASH);
12866 tp->nvram_pagesize = 256;
12871 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12873 u32 nvcfg1, protect = 0;
12875 nvcfg1 = tr32(NVRAM_CFG1);
12877 /* NVRAM protection for TPM */
12878 if (nvcfg1 & (1 << 27)) {
12879 tg3_flag_set(tp, PROTECTED_NVRAM);
12883 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12885 case FLASH_5761VENDOR_ATMEL_ADB021D:
12886 case FLASH_5761VENDOR_ATMEL_ADB041D:
12887 case FLASH_5761VENDOR_ATMEL_ADB081D:
12888 case FLASH_5761VENDOR_ATMEL_ADB161D:
12889 case FLASH_5761VENDOR_ATMEL_MDB021D:
12890 case FLASH_5761VENDOR_ATMEL_MDB041D:
12891 case FLASH_5761VENDOR_ATMEL_MDB081D:
12892 case FLASH_5761VENDOR_ATMEL_MDB161D:
12893 tp->nvram_jedecnum = JEDEC_ATMEL;
12894 tg3_flag_set(tp, NVRAM_BUFFERED);
12895 tg3_flag_set(tp, FLASH);
12896 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12897 tp->nvram_pagesize = 256;
12899 case FLASH_5761VENDOR_ST_A_M45PE20:
12900 case FLASH_5761VENDOR_ST_A_M45PE40:
12901 case FLASH_5761VENDOR_ST_A_M45PE80:
12902 case FLASH_5761VENDOR_ST_A_M45PE16:
12903 case FLASH_5761VENDOR_ST_M_M45PE20:
12904 case FLASH_5761VENDOR_ST_M_M45PE40:
12905 case FLASH_5761VENDOR_ST_M_M45PE80:
12906 case FLASH_5761VENDOR_ST_M_M45PE16:
12907 tp->nvram_jedecnum = JEDEC_ST;
12908 tg3_flag_set(tp, NVRAM_BUFFERED);
12909 tg3_flag_set(tp, FLASH);
12910 tp->nvram_pagesize = 256;
12915 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12918 case FLASH_5761VENDOR_ATMEL_ADB161D:
12919 case FLASH_5761VENDOR_ATMEL_MDB161D:
12920 case FLASH_5761VENDOR_ST_A_M45PE16:
12921 case FLASH_5761VENDOR_ST_M_M45PE16:
12922 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12924 case FLASH_5761VENDOR_ATMEL_ADB081D:
12925 case FLASH_5761VENDOR_ATMEL_MDB081D:
12926 case FLASH_5761VENDOR_ST_A_M45PE80:
12927 case FLASH_5761VENDOR_ST_M_M45PE80:
12928 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12930 case FLASH_5761VENDOR_ATMEL_ADB041D:
12931 case FLASH_5761VENDOR_ATMEL_MDB041D:
12932 case FLASH_5761VENDOR_ST_A_M45PE40:
12933 case FLASH_5761VENDOR_ST_M_M45PE40:
12934 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12936 case FLASH_5761VENDOR_ATMEL_ADB021D:
12937 case FLASH_5761VENDOR_ATMEL_MDB021D:
12938 case FLASH_5761VENDOR_ST_A_M45PE20:
12939 case FLASH_5761VENDOR_ST_M_M45PE20:
12940 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12946 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12948 tp->nvram_jedecnum = JEDEC_ATMEL;
12949 tg3_flag_set(tp, NVRAM_BUFFERED);
12950 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12953 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12957 nvcfg1 = tr32(NVRAM_CFG1);
12959 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12960 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12961 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12962 tp->nvram_jedecnum = JEDEC_ATMEL;
12963 tg3_flag_set(tp, NVRAM_BUFFERED);
12964 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12966 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12967 tw32(NVRAM_CFG1, nvcfg1);
12969 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12970 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12971 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12972 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12973 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12974 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12975 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12976 tp->nvram_jedecnum = JEDEC_ATMEL;
12977 tg3_flag_set(tp, NVRAM_BUFFERED);
12978 tg3_flag_set(tp, FLASH);
12980 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12981 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12982 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12983 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12984 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12986 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12987 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12988 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12990 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12991 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12992 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12996 case FLASH_5752VENDOR_ST_M45PE10:
12997 case FLASH_5752VENDOR_ST_M45PE20:
12998 case FLASH_5752VENDOR_ST_M45PE40:
12999 tp->nvram_jedecnum = JEDEC_ST;
13000 tg3_flag_set(tp, NVRAM_BUFFERED);
13001 tg3_flag_set(tp, FLASH);
13003 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13004 case FLASH_5752VENDOR_ST_M45PE10:
13005 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13007 case FLASH_5752VENDOR_ST_M45PE20:
13008 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13010 case FLASH_5752VENDOR_ST_M45PE40:
13011 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13016 tg3_flag_set(tp, NO_NVRAM);
13020 tg3_nvram_get_pagesize(tp, nvcfg1);
13021 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13022 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13026 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
13030 nvcfg1 = tr32(NVRAM_CFG1);
13032 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13033 case FLASH_5717VENDOR_ATMEL_EEPROM:
13034 case FLASH_5717VENDOR_MICRO_EEPROM:
13035 tp->nvram_jedecnum = JEDEC_ATMEL;
13036 tg3_flag_set(tp, NVRAM_BUFFERED);
13037 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13039 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13040 tw32(NVRAM_CFG1, nvcfg1);
13042 case FLASH_5717VENDOR_ATMEL_MDB011D:
13043 case FLASH_5717VENDOR_ATMEL_ADB011B:
13044 case FLASH_5717VENDOR_ATMEL_ADB011D:
13045 case FLASH_5717VENDOR_ATMEL_MDB021D:
13046 case FLASH_5717VENDOR_ATMEL_ADB021B:
13047 case FLASH_5717VENDOR_ATMEL_ADB021D:
13048 case FLASH_5717VENDOR_ATMEL_45USPT:
13049 tp->nvram_jedecnum = JEDEC_ATMEL;
13050 tg3_flag_set(tp, NVRAM_BUFFERED);
13051 tg3_flag_set(tp, FLASH);
13053 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13054 case FLASH_5717VENDOR_ATMEL_MDB021D:
13055 /* Detect size with tg3_nvram_get_size() */
13057 case FLASH_5717VENDOR_ATMEL_ADB021B:
13058 case FLASH_5717VENDOR_ATMEL_ADB021D:
13059 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13062 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13066 case FLASH_5717VENDOR_ST_M_M25PE10:
13067 case FLASH_5717VENDOR_ST_A_M25PE10:
13068 case FLASH_5717VENDOR_ST_M_M45PE10:
13069 case FLASH_5717VENDOR_ST_A_M45PE10:
13070 case FLASH_5717VENDOR_ST_M_M25PE20:
13071 case FLASH_5717VENDOR_ST_A_M25PE20:
13072 case FLASH_5717VENDOR_ST_M_M45PE20:
13073 case FLASH_5717VENDOR_ST_A_M45PE20:
13074 case FLASH_5717VENDOR_ST_25USPT:
13075 case FLASH_5717VENDOR_ST_45USPT:
13076 tp->nvram_jedecnum = JEDEC_ST;
13077 tg3_flag_set(tp, NVRAM_BUFFERED);
13078 tg3_flag_set(tp, FLASH);
13080 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13081 case FLASH_5717VENDOR_ST_M_M25PE20:
13082 case FLASH_5717VENDOR_ST_M_M45PE20:
13083 /* Detect size with tg3_nvram_get_size() */
13085 case FLASH_5717VENDOR_ST_A_M25PE20:
13086 case FLASH_5717VENDOR_ST_A_M45PE20:
13087 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13090 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13095 tg3_flag_set(tp, NO_NVRAM);
13099 tg3_nvram_get_pagesize(tp, nvcfg1);
13100 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13101 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13104 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
13106 u32 nvcfg1, nvmpinstrp;
13108 nvcfg1 = tr32(NVRAM_CFG1);
13109 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13111 switch (nvmpinstrp) {
13112 case FLASH_5720_EEPROM_HD:
13113 case FLASH_5720_EEPROM_LD:
13114 tp->nvram_jedecnum = JEDEC_ATMEL;
13115 tg3_flag_set(tp, NVRAM_BUFFERED);
13117 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13118 tw32(NVRAM_CFG1, nvcfg1);
13119 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13120 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13122 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13124 case FLASH_5720VENDOR_M_ATMEL_DB011D:
13125 case FLASH_5720VENDOR_A_ATMEL_DB011B:
13126 case FLASH_5720VENDOR_A_ATMEL_DB011D:
13127 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13128 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13129 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13130 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13131 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13132 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13133 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13134 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13135 case FLASH_5720VENDOR_ATMEL_45USPT:
13136 tp->nvram_jedecnum = JEDEC_ATMEL;
13137 tg3_flag_set(tp, NVRAM_BUFFERED);
13138 tg3_flag_set(tp, FLASH);
13140 switch (nvmpinstrp) {
13141 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13142 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13143 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13144 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13146 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13147 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13148 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13149 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13151 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13152 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13153 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13156 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13160 case FLASH_5720VENDOR_M_ST_M25PE10:
13161 case FLASH_5720VENDOR_M_ST_M45PE10:
13162 case FLASH_5720VENDOR_A_ST_M25PE10:
13163 case FLASH_5720VENDOR_A_ST_M45PE10:
13164 case FLASH_5720VENDOR_M_ST_M25PE20:
13165 case FLASH_5720VENDOR_M_ST_M45PE20:
13166 case FLASH_5720VENDOR_A_ST_M25PE20:
13167 case FLASH_5720VENDOR_A_ST_M45PE20:
13168 case FLASH_5720VENDOR_M_ST_M25PE40:
13169 case FLASH_5720VENDOR_M_ST_M45PE40:
13170 case FLASH_5720VENDOR_A_ST_M25PE40:
13171 case FLASH_5720VENDOR_A_ST_M45PE40:
13172 case FLASH_5720VENDOR_M_ST_M25PE80:
13173 case FLASH_5720VENDOR_M_ST_M45PE80:
13174 case FLASH_5720VENDOR_A_ST_M25PE80:
13175 case FLASH_5720VENDOR_A_ST_M45PE80:
13176 case FLASH_5720VENDOR_ST_25USPT:
13177 case FLASH_5720VENDOR_ST_45USPT:
13178 tp->nvram_jedecnum = JEDEC_ST;
13179 tg3_flag_set(tp, NVRAM_BUFFERED);
13180 tg3_flag_set(tp, FLASH);
13182 switch (nvmpinstrp) {
13183 case FLASH_5720VENDOR_M_ST_M25PE20:
13184 case FLASH_5720VENDOR_M_ST_M45PE20:
13185 case FLASH_5720VENDOR_A_ST_M25PE20:
13186 case FLASH_5720VENDOR_A_ST_M45PE20:
13187 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13189 case FLASH_5720VENDOR_M_ST_M25PE40:
13190 case FLASH_5720VENDOR_M_ST_M45PE40:
13191 case FLASH_5720VENDOR_A_ST_M25PE40:
13192 case FLASH_5720VENDOR_A_ST_M45PE40:
13193 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13195 case FLASH_5720VENDOR_M_ST_M25PE80:
13196 case FLASH_5720VENDOR_M_ST_M45PE80:
13197 case FLASH_5720VENDOR_A_ST_M25PE80:
13198 case FLASH_5720VENDOR_A_ST_M45PE80:
13199 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13202 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13207 tg3_flag_set(tp, NO_NVRAM);
13211 tg3_nvram_get_pagesize(tp, nvcfg1);
13212 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13213 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13216 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13217 static void __devinit tg3_nvram_init(struct tg3 *tp)
13219 tw32_f(GRC_EEPROM_ADDR,
13220 (EEPROM_ADDR_FSM_RESET |
13221 (EEPROM_DEFAULT_CLOCK_PERIOD <<
13222 EEPROM_ADDR_CLKPERD_SHIFT)));
13226 /* Enable seeprom accesses. */
13227 tw32_f(GRC_LOCAL_CTRL,
13228 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13231 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13232 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13233 tg3_flag_set(tp, NVRAM);
13235 if (tg3_nvram_lock(tp)) {
13236 netdev_warn(tp->dev,
13237 "Cannot get nvram lock, %s failed\n",
13241 tg3_enable_nvram_access(tp);
13243 tp->nvram_size = 0;
13245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13246 tg3_get_5752_nvram_info(tp);
13247 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13248 tg3_get_5755_nvram_info(tp);
13249 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13251 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13252 tg3_get_5787_nvram_info(tp);
13253 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13254 tg3_get_5761_nvram_info(tp);
13255 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13256 tg3_get_5906_nvram_info(tp);
13257 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13258 tg3_flag(tp, 57765_CLASS))
13259 tg3_get_57780_nvram_info(tp);
13260 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13261 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13262 tg3_get_5717_nvram_info(tp);
13263 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13264 tg3_get_5720_nvram_info(tp);
13266 tg3_get_nvram_info(tp);
13268 if (tp->nvram_size == 0)
13269 tg3_get_nvram_size(tp);
13271 tg3_disable_nvram_access(tp);
13272 tg3_nvram_unlock(tp);
13275 tg3_flag_clear(tp, NVRAM);
13276 tg3_flag_clear(tp, NVRAM_BUFFERED);
13278 tg3_get_eeprom_size(tp);
13282 struct subsys_tbl_ent {
13283 u16 subsys_vendor, subsys_devid;
13287 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13288 /* Broadcom boards. */
13289 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13290 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13291 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13292 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13293 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13294 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13295 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13296 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13297 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13298 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13299 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13300 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13301 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13302 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13303 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13304 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13305 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13306 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13307 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13308 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13309 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13310 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13313 { TG3PCI_SUBVENDOR_ID_3COM,
13314 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13315 { TG3PCI_SUBVENDOR_ID_3COM,
13316 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13317 { TG3PCI_SUBVENDOR_ID_3COM,
13318 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13319 { TG3PCI_SUBVENDOR_ID_3COM,
13320 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13321 { TG3PCI_SUBVENDOR_ID_3COM,
13322 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13325 { TG3PCI_SUBVENDOR_ID_DELL,
13326 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13327 { TG3PCI_SUBVENDOR_ID_DELL,
13328 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13329 { TG3PCI_SUBVENDOR_ID_DELL,
13330 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13331 { TG3PCI_SUBVENDOR_ID_DELL,
13332 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13334 /* Compaq boards. */
13335 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13336 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13337 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13338 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13339 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13340 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13341 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13342 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13343 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13344 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13347 { TG3PCI_SUBVENDOR_ID_IBM,
13348 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13351 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13355 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13356 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13357 tp->pdev->subsystem_vendor) &&
13358 (subsys_id_to_phy_id[i].subsys_devid ==
13359 tp->pdev->subsystem_device))
13360 return &subsys_id_to_phy_id[i];
13365 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13369 tp->phy_id = TG3_PHY_ID_INVALID;
13370 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13372 /* Assume an onboard device and WOL capable by default. */
13373 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13374 tg3_flag_set(tp, WOL_CAP);
13376 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13377 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13378 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13379 tg3_flag_set(tp, IS_NIC);
13381 val = tr32(VCPU_CFGSHDW);
13382 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13383 tg3_flag_set(tp, ASPM_WORKAROUND);
13384 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13385 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13386 tg3_flag_set(tp, WOL_ENABLE);
13387 device_set_wakeup_enable(&tp->pdev->dev, true);
13392 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13393 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13394 u32 nic_cfg, led_cfg;
13395 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13396 int eeprom_phy_serdes = 0;
13398 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13399 tp->nic_sram_data_cfg = nic_cfg;
13401 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13402 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13403 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13404 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13405 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13406 (ver > 0) && (ver < 0x100))
13407 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13410 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13412 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13413 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13414 eeprom_phy_serdes = 1;
13416 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13417 if (nic_phy_id != 0) {
13418 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13419 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13421 eeprom_phy_id = (id1 >> 16) << 10;
13422 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13423 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13427 tp->phy_id = eeprom_phy_id;
13428 if (eeprom_phy_serdes) {
13429 if (!tg3_flag(tp, 5705_PLUS))
13430 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13432 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13435 if (tg3_flag(tp, 5750_PLUS))
13436 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13437 SHASTA_EXT_LED_MODE_MASK);
13439 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13443 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13444 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13447 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13448 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13451 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13452 tp->led_ctrl = LED_CTRL_MODE_MAC;
13454 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13455 * read on some older 5700/5701 bootcode.
13457 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13459 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13461 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13465 case SHASTA_EXT_LED_SHARED:
13466 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13467 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13468 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13469 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13470 LED_CTRL_MODE_PHY_2);
13473 case SHASTA_EXT_LED_MAC:
13474 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13477 case SHASTA_EXT_LED_COMBO:
13478 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13479 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13480 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13481 LED_CTRL_MODE_PHY_2);
13486 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13488 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13489 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13491 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13492 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13494 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13495 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13496 if ((tp->pdev->subsystem_vendor ==
13497 PCI_VENDOR_ID_ARIMA) &&
13498 (tp->pdev->subsystem_device == 0x205a ||
13499 tp->pdev->subsystem_device == 0x2063))
13500 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13502 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13503 tg3_flag_set(tp, IS_NIC);
13506 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13507 tg3_flag_set(tp, ENABLE_ASF);
13508 if (tg3_flag(tp, 5750_PLUS))
13509 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13512 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13513 tg3_flag(tp, 5750_PLUS))
13514 tg3_flag_set(tp, ENABLE_APE);
13516 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13517 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13518 tg3_flag_clear(tp, WOL_CAP);
13520 if (tg3_flag(tp, WOL_CAP) &&
13521 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13522 tg3_flag_set(tp, WOL_ENABLE);
13523 device_set_wakeup_enable(&tp->pdev->dev, true);
13526 if (cfg2 & (1 << 17))
13527 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13529 /* serdes signal pre-emphasis in register 0x590 set by */
13530 /* bootcode if bit 18 is set */
13531 if (cfg2 & (1 << 18))
13532 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13534 if ((tg3_flag(tp, 57765_PLUS) ||
13535 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13536 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13537 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13538 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13540 if (tg3_flag(tp, PCI_EXPRESS) &&
13541 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13542 !tg3_flag(tp, 57765_PLUS)) {
13545 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13546 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13547 tg3_flag_set(tp, ASPM_WORKAROUND);
13550 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13551 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13552 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13553 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13554 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13555 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13558 if (tg3_flag(tp, WOL_CAP))
13559 device_set_wakeup_enable(&tp->pdev->dev,
13560 tg3_flag(tp, WOL_ENABLE));
13562 device_set_wakeup_capable(&tp->pdev->dev, false);
13565 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13570 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13571 tw32(OTP_CTRL, cmd);
13573 /* Wait for up to 1 ms for command to execute. */
13574 for (i = 0; i < 100; i++) {
13575 val = tr32(OTP_STATUS);
13576 if (val & OTP_STATUS_CMD_DONE)
13581 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13584 /* Read the gphy configuration from the OTP region of the chip. The gphy
13585 * configuration is a 32-bit value that straddles the alignment boundary.
13586 * We do two 32-bit reads and then shift and merge the results.
13588 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13590 u32 bhalf_otp, thalf_otp;
13592 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13594 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13597 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13599 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13602 thalf_otp = tr32(OTP_READ_DATA);
13604 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13606 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13609 bhalf_otp = tr32(OTP_READ_DATA);
13611 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13614 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13616 u32 adv = ADVERTISED_Autoneg;
13618 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13619 adv |= ADVERTISED_1000baseT_Half |
13620 ADVERTISED_1000baseT_Full;
13622 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13623 adv |= ADVERTISED_100baseT_Half |
13624 ADVERTISED_100baseT_Full |
13625 ADVERTISED_10baseT_Half |
13626 ADVERTISED_10baseT_Full |
13629 adv |= ADVERTISED_FIBRE;
13631 tp->link_config.advertising = adv;
13632 tp->link_config.speed = SPEED_UNKNOWN;
13633 tp->link_config.duplex = DUPLEX_UNKNOWN;
13634 tp->link_config.autoneg = AUTONEG_ENABLE;
13635 tp->link_config.active_speed = SPEED_UNKNOWN;
13636 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13641 static int __devinit tg3_phy_probe(struct tg3 *tp)
13643 u32 hw_phy_id_1, hw_phy_id_2;
13644 u32 hw_phy_id, hw_phy_id_masked;
13647 /* flow control autonegotiation is default behavior */
13648 tg3_flag_set(tp, PAUSE_AUTONEG);
13649 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13651 if (tg3_flag(tp, USE_PHYLIB))
13652 return tg3_phy_init(tp);
13654 /* Reading the PHY ID register can conflict with ASF
13655 * firmware access to the PHY hardware.
13658 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13659 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13661 /* Now read the physical PHY_ID from the chip and verify
13662 * that it is sane. If it doesn't look good, we fall back
13663 * to either the hard-coded table based PHY_ID and failing
13664 * that the value found in the eeprom area.
13666 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13667 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13669 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13670 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13671 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13673 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13676 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13677 tp->phy_id = hw_phy_id;
13678 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13679 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13681 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13683 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13684 /* Do nothing, phy ID already set up in
13685 * tg3_get_eeprom_hw_cfg().
13688 struct subsys_tbl_ent *p;
13690 /* No eeprom signature? Try the hardcoded
13691 * subsys device table.
13693 p = tg3_lookup_by_subsys(tp);
13697 tp->phy_id = p->phy_id;
13699 tp->phy_id == TG3_PHY_ID_BCM8002)
13700 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13704 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13705 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13707 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13708 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13709 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13710 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13711 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13713 tg3_phy_init_link_config(tp);
13715 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13716 !tg3_flag(tp, ENABLE_APE) &&
13717 !tg3_flag(tp, ENABLE_ASF)) {
13720 tg3_readphy(tp, MII_BMSR, &bmsr);
13721 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13722 (bmsr & BMSR_LSTATUS))
13723 goto skip_phy_reset;
13725 err = tg3_phy_reset(tp);
13729 tg3_phy_set_wirespeed(tp);
13731 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13732 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13733 tp->link_config.flowctrl);
13735 tg3_writephy(tp, MII_BMCR,
13736 BMCR_ANENABLE | BMCR_ANRESTART);
13741 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13742 err = tg3_init_5401phy_dsp(tp);
13746 err = tg3_init_5401phy_dsp(tp);
13752 static void __devinit tg3_read_vpd(struct tg3 *tp)
13755 unsigned int block_end, rosize, len;
13759 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13763 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13765 goto out_not_found;
13767 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13768 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13769 i += PCI_VPD_LRDT_TAG_SIZE;
13771 if (block_end > vpdlen)
13772 goto out_not_found;
13774 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13775 PCI_VPD_RO_KEYWORD_MFR_ID);
13777 len = pci_vpd_info_field_size(&vpd_data[j]);
13779 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13780 if (j + len > block_end || len != 4 ||
13781 memcmp(&vpd_data[j], "1028", 4))
13784 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13785 PCI_VPD_RO_KEYWORD_VENDOR0);
13789 len = pci_vpd_info_field_size(&vpd_data[j]);
13791 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13792 if (j + len > block_end)
13795 memcpy(tp->fw_ver, &vpd_data[j], len);
13796 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13800 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13801 PCI_VPD_RO_KEYWORD_PARTNO);
13803 goto out_not_found;
13805 len = pci_vpd_info_field_size(&vpd_data[i]);
13807 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13808 if (len > TG3_BPN_SIZE ||
13809 (len + i) > vpdlen)
13810 goto out_not_found;
13812 memcpy(tp->board_part_number, &vpd_data[i], len);
13816 if (tp->board_part_number[0])
13820 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13821 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13822 strcpy(tp->board_part_number, "BCM5717");
13823 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13824 strcpy(tp->board_part_number, "BCM5718");
13827 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13828 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13829 strcpy(tp->board_part_number, "BCM57780");
13830 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13831 strcpy(tp->board_part_number, "BCM57760");
13832 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13833 strcpy(tp->board_part_number, "BCM57790");
13834 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13835 strcpy(tp->board_part_number, "BCM57788");
13838 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13839 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13840 strcpy(tp->board_part_number, "BCM57761");
13841 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13842 strcpy(tp->board_part_number, "BCM57765");
13843 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13844 strcpy(tp->board_part_number, "BCM57781");
13845 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13846 strcpy(tp->board_part_number, "BCM57785");
13847 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13848 strcpy(tp->board_part_number, "BCM57791");
13849 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13850 strcpy(tp->board_part_number, "BCM57795");
13853 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13854 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13855 strcpy(tp->board_part_number, "BCM57762");
13856 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13857 strcpy(tp->board_part_number, "BCM57766");
13858 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13859 strcpy(tp->board_part_number, "BCM57782");
13860 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13861 strcpy(tp->board_part_number, "BCM57786");
13864 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13865 strcpy(tp->board_part_number, "BCM95906");
13868 strcpy(tp->board_part_number, "none");
13872 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13876 if (tg3_nvram_read(tp, offset, &val) ||
13877 (val & 0xfc000000) != 0x0c000000 ||
13878 tg3_nvram_read(tp, offset + 4, &val) ||
13885 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13887 u32 val, offset, start, ver_offset;
13889 bool newver = false;
13891 if (tg3_nvram_read(tp, 0xc, &offset) ||
13892 tg3_nvram_read(tp, 0x4, &start))
13895 offset = tg3_nvram_logical_addr(tp, offset);
13897 if (tg3_nvram_read(tp, offset, &val))
13900 if ((val & 0xfc000000) == 0x0c000000) {
13901 if (tg3_nvram_read(tp, offset + 4, &val))
13908 dst_off = strlen(tp->fw_ver);
13911 if (TG3_VER_SIZE - dst_off < 16 ||
13912 tg3_nvram_read(tp, offset + 8, &ver_offset))
13915 offset = offset + ver_offset - start;
13916 for (i = 0; i < 16; i += 4) {
13918 if (tg3_nvram_read_be32(tp, offset + i, &v))
13921 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13926 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13929 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13930 TG3_NVM_BCVER_MAJSFT;
13931 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13932 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13933 "v%d.%02d", major, minor);
13937 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13939 u32 val, major, minor;
13941 /* Use native endian representation */
13942 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13945 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13946 TG3_NVM_HWSB_CFG1_MAJSFT;
13947 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13948 TG3_NVM_HWSB_CFG1_MINSFT;
13950 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13953 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13955 u32 offset, major, minor, build;
13957 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13959 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13962 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13963 case TG3_EEPROM_SB_REVISION_0:
13964 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13966 case TG3_EEPROM_SB_REVISION_2:
13967 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13969 case TG3_EEPROM_SB_REVISION_3:
13970 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13972 case TG3_EEPROM_SB_REVISION_4:
13973 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13975 case TG3_EEPROM_SB_REVISION_5:
13976 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13978 case TG3_EEPROM_SB_REVISION_6:
13979 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13985 if (tg3_nvram_read(tp, offset, &val))
13988 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13989 TG3_EEPROM_SB_EDH_BLD_SHFT;
13990 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13991 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13992 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13994 if (minor > 99 || build > 26)
13997 offset = strlen(tp->fw_ver);
13998 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13999 " v%d.%02d", major, minor);
14002 offset = strlen(tp->fw_ver);
14003 if (offset < TG3_VER_SIZE - 1)
14004 tp->fw_ver[offset] = 'a' + build - 1;
14008 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
14010 u32 val, offset, start;
14013 for (offset = TG3_NVM_DIR_START;
14014 offset < TG3_NVM_DIR_END;
14015 offset += TG3_NVM_DIRENT_SIZE) {
14016 if (tg3_nvram_read(tp, offset, &val))
14019 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14023 if (offset == TG3_NVM_DIR_END)
14026 if (!tg3_flag(tp, 5705_PLUS))
14027 start = 0x08000000;
14028 else if (tg3_nvram_read(tp, offset - 4, &start))
14031 if (tg3_nvram_read(tp, offset + 4, &offset) ||
14032 !tg3_fw_img_is_valid(tp, offset) ||
14033 tg3_nvram_read(tp, offset + 8, &val))
14036 offset += val - start;
14038 vlen = strlen(tp->fw_ver);
14040 tp->fw_ver[vlen++] = ',';
14041 tp->fw_ver[vlen++] = ' ';
14043 for (i = 0; i < 4; i++) {
14045 if (tg3_nvram_read_be32(tp, offset, &v))
14048 offset += sizeof(v);
14050 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14051 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14055 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14060 static void __devinit tg3_probe_ncsi(struct tg3 *tp)
14064 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14065 if (apedata != APE_SEG_SIG_MAGIC)
14068 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14069 if (!(apedata & APE_FW_STATUS_READY))
14072 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14073 tg3_flag_set(tp, APE_HAS_NCSI);
14076 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
14082 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14084 if (tg3_flag(tp, APE_HAS_NCSI))
14089 vlen = strlen(tp->fw_ver);
14091 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14093 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14094 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14095 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14096 (apedata & APE_FW_VERSION_BLDMSK));
14099 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
14102 bool vpd_vers = false;
14104 if (tp->fw_ver[0] != 0)
14107 if (tg3_flag(tp, NO_NVRAM)) {
14108 strcat(tp->fw_ver, "sb");
14112 if (tg3_nvram_read(tp, 0, &val))
14115 if (val == TG3_EEPROM_MAGIC)
14116 tg3_read_bc_ver(tp);
14117 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14118 tg3_read_sb_ver(tp, val);
14119 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14120 tg3_read_hwsb_ver(tp);
14122 if (tg3_flag(tp, ENABLE_ASF)) {
14123 if (tg3_flag(tp, ENABLE_APE)) {
14124 tg3_probe_ncsi(tp);
14126 tg3_read_dash_ver(tp);
14127 } else if (!vpd_vers) {
14128 tg3_read_mgmtfw_ver(tp);
14132 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14135 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14137 if (tg3_flag(tp, LRG_PROD_RING_CAP))
14138 return TG3_RX_RET_MAX_SIZE_5717;
14139 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14140 return TG3_RX_RET_MAX_SIZE_5700;
14142 return TG3_RX_RET_MAX_SIZE_5705;
14145 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14146 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14147 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14148 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14152 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14154 struct pci_dev *peer;
14155 unsigned int func, devnr = tp->pdev->devfn & ~7;
14157 for (func = 0; func < 8; func++) {
14158 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14159 if (peer && peer != tp->pdev)
14163 /* 5704 can be configured in single-port mode, set peer to
14164 * tp->pdev in that case.
14172 * We don't need to keep the refcount elevated; there's no way
14173 * to remove one half of this device without removing the other
14180 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14182 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14183 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14186 /* All devices that use the alternate
14187 * ASIC REV location have a CPMU.
14189 tg3_flag_set(tp, CPMU_PRESENT);
14191 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14192 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14193 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14194 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14195 reg = TG3PCI_GEN2_PRODID_ASICREV;
14196 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14197 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14198 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14200 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14201 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14202 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14203 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14204 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14205 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14206 reg = TG3PCI_GEN15_PRODID_ASICREV;
14208 reg = TG3PCI_PRODID_ASICREV;
14210 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14213 /* Wrong chip ID in 5752 A0. This code can be removed later
14214 * as A0 is not in production.
14216 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14217 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14219 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14220 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14221 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14222 tg3_flag_set(tp, 5717_PLUS);
14224 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14226 tg3_flag_set(tp, 57765_CLASS);
14228 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14229 tg3_flag_set(tp, 57765_PLUS);
14231 /* Intentionally exclude ASIC_REV_5906 */
14232 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14234 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14235 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14236 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14237 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14238 tg3_flag(tp, 57765_PLUS))
14239 tg3_flag_set(tp, 5755_PLUS);
14241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14242 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14243 tg3_flag_set(tp, 5780_CLASS);
14245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14248 tg3_flag(tp, 5755_PLUS) ||
14249 tg3_flag(tp, 5780_CLASS))
14250 tg3_flag_set(tp, 5750_PLUS);
14252 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14253 tg3_flag(tp, 5750_PLUS))
14254 tg3_flag_set(tp, 5705_PLUS);
14257 static int __devinit tg3_get_invariants(struct tg3 *tp)
14260 u32 pci_state_reg, grc_misc_cfg;
14265 /* Force memory write invalidate off. If we leave it on,
14266 * then on 5700_BX chips we have to enable a workaround.
14267 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14268 * to match the cacheline size. The Broadcom driver have this
14269 * workaround but turns MWI off all the times so never uses
14270 * it. This seems to suggest that the workaround is insufficient.
14272 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14273 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14274 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14276 /* Important! -- Make sure register accesses are byteswapped
14277 * correctly. Also, for those chips that require it, make
14278 * sure that indirect register accesses are enabled before
14279 * the first operation.
14281 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14283 tp->misc_host_ctrl |= (misc_ctrl_reg &
14284 MISC_HOST_CTRL_CHIPREV);
14285 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14286 tp->misc_host_ctrl);
14288 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14290 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14291 * we need to disable memory and use config. cycles
14292 * only to access all registers. The 5702/03 chips
14293 * can mistakenly decode the special cycles from the
14294 * ICH chipsets as memory write cycles, causing corruption
14295 * of register and memory space. Only certain ICH bridges
14296 * will drive special cycles with non-zero data during the
14297 * address phase which can fall within the 5703's address
14298 * range. This is not an ICH bug as the PCI spec allows
14299 * non-zero address during special cycles. However, only
14300 * these ICH bridges are known to drive non-zero addresses
14301 * during special cycles.
14303 * Since special cycles do not cross PCI bridges, we only
14304 * enable this workaround if the 5703 is on the secondary
14305 * bus of these ICH bridges.
14307 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14308 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14309 static struct tg3_dev_id {
14313 } ich_chipsets[] = {
14314 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14316 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14318 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14320 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14324 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14325 struct pci_dev *bridge = NULL;
14327 while (pci_id->vendor != 0) {
14328 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14334 if (pci_id->rev != PCI_ANY_ID) {
14335 if (bridge->revision > pci_id->rev)
14338 if (bridge->subordinate &&
14339 (bridge->subordinate->number ==
14340 tp->pdev->bus->number)) {
14341 tg3_flag_set(tp, ICH_WORKAROUND);
14342 pci_dev_put(bridge);
14348 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14349 static struct tg3_dev_id {
14352 } bridge_chipsets[] = {
14353 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14354 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14357 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14358 struct pci_dev *bridge = NULL;
14360 while (pci_id->vendor != 0) {
14361 bridge = pci_get_device(pci_id->vendor,
14368 if (bridge->subordinate &&
14369 (bridge->subordinate->number <=
14370 tp->pdev->bus->number) &&
14371 (bridge->subordinate->busn_res.end >=
14372 tp->pdev->bus->number)) {
14373 tg3_flag_set(tp, 5701_DMA_BUG);
14374 pci_dev_put(bridge);
14380 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14381 * DMA addresses > 40-bit. This bridge may have other additional
14382 * 57xx devices behind it in some 4-port NIC designs for example.
14383 * Any tg3 device found behind the bridge will also need the 40-bit
14386 if (tg3_flag(tp, 5780_CLASS)) {
14387 tg3_flag_set(tp, 40BIT_DMA_BUG);
14388 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14390 struct pci_dev *bridge = NULL;
14393 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14394 PCI_DEVICE_ID_SERVERWORKS_EPB,
14396 if (bridge && bridge->subordinate &&
14397 (bridge->subordinate->number <=
14398 tp->pdev->bus->number) &&
14399 (bridge->subordinate->busn_res.end >=
14400 tp->pdev->bus->number)) {
14401 tg3_flag_set(tp, 40BIT_DMA_BUG);
14402 pci_dev_put(bridge);
14408 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14409 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14410 tp->pdev_peer = tg3_find_peer(tp);
14412 /* Determine TSO capabilities */
14413 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14414 ; /* Do nothing. HW bug. */
14415 else if (tg3_flag(tp, 57765_PLUS))
14416 tg3_flag_set(tp, HW_TSO_3);
14417 else if (tg3_flag(tp, 5755_PLUS) ||
14418 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14419 tg3_flag_set(tp, HW_TSO_2);
14420 else if (tg3_flag(tp, 5750_PLUS)) {
14421 tg3_flag_set(tp, HW_TSO_1);
14422 tg3_flag_set(tp, TSO_BUG);
14423 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14424 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14425 tg3_flag_clear(tp, TSO_BUG);
14426 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14427 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14428 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14429 tg3_flag_set(tp, TSO_BUG);
14430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14431 tp->fw_needed = FIRMWARE_TG3TSO5;
14433 tp->fw_needed = FIRMWARE_TG3TSO;
14436 /* Selectively allow TSO based on operating conditions */
14437 if (tg3_flag(tp, HW_TSO_1) ||
14438 tg3_flag(tp, HW_TSO_2) ||
14439 tg3_flag(tp, HW_TSO_3) ||
14441 /* For firmware TSO, assume ASF is disabled.
14442 * We'll disable TSO later if we discover ASF
14443 * is enabled in tg3_get_eeprom_hw_cfg().
14445 tg3_flag_set(tp, TSO_CAPABLE);
14447 tg3_flag_clear(tp, TSO_CAPABLE);
14448 tg3_flag_clear(tp, TSO_BUG);
14449 tp->fw_needed = NULL;
14452 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14453 tp->fw_needed = FIRMWARE_TG3;
14457 if (tg3_flag(tp, 5750_PLUS)) {
14458 tg3_flag_set(tp, SUPPORT_MSI);
14459 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14460 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14461 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14462 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14463 tp->pdev_peer == tp->pdev))
14464 tg3_flag_clear(tp, SUPPORT_MSI);
14466 if (tg3_flag(tp, 5755_PLUS) ||
14467 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14468 tg3_flag_set(tp, 1SHOT_MSI);
14471 if (tg3_flag(tp, 57765_PLUS)) {
14472 tg3_flag_set(tp, SUPPORT_MSIX);
14473 tp->irq_max = TG3_IRQ_MAX_VECS;
14474 tg3_rss_init_dflt_indir_tbl(tp);
14478 if (tg3_flag(tp, 5755_PLUS) ||
14479 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14480 tg3_flag_set(tp, SHORT_DMA_BUG);
14482 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14483 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14485 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14488 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14490 if (tg3_flag(tp, 57765_PLUS) &&
14491 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14492 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14494 if (!tg3_flag(tp, 5705_PLUS) ||
14495 tg3_flag(tp, 5780_CLASS) ||
14496 tg3_flag(tp, USE_JUMBO_BDFLAG))
14497 tg3_flag_set(tp, JUMBO_CAPABLE);
14499 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14502 if (pci_is_pcie(tp->pdev)) {
14505 tg3_flag_set(tp, PCI_EXPRESS);
14507 pci_read_config_word(tp->pdev,
14508 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14510 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14511 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14513 tg3_flag_clear(tp, HW_TSO_2);
14514 tg3_flag_clear(tp, TSO_CAPABLE);
14516 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14517 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14518 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14519 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14520 tg3_flag_set(tp, CLKREQ_BUG);
14521 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14522 tg3_flag_set(tp, L1PLLPD_EN);
14524 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14525 /* BCM5785 devices are effectively PCIe devices, and should
14526 * follow PCIe codepaths, but do not have a PCIe capabilities
14529 tg3_flag_set(tp, PCI_EXPRESS);
14530 } else if (!tg3_flag(tp, 5705_PLUS) ||
14531 tg3_flag(tp, 5780_CLASS)) {
14532 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14533 if (!tp->pcix_cap) {
14534 dev_err(&tp->pdev->dev,
14535 "Cannot find PCI-X capability, aborting\n");
14539 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14540 tg3_flag_set(tp, PCIX_MODE);
14543 /* If we have an AMD 762 or VIA K8T800 chipset, write
14544 * reordering to the mailbox registers done by the host
14545 * controller can cause major troubles. We read back from
14546 * every mailbox register write to force the writes to be
14547 * posted to the chip in order.
14549 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14550 !tg3_flag(tp, PCI_EXPRESS))
14551 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14553 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14554 &tp->pci_cacheline_sz);
14555 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14556 &tp->pci_lat_timer);
14557 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14558 tp->pci_lat_timer < 64) {
14559 tp->pci_lat_timer = 64;
14560 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14561 tp->pci_lat_timer);
14564 /* Important! -- It is critical that the PCI-X hw workaround
14565 * situation is decided before the first MMIO register access.
14567 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14568 /* 5700 BX chips need to have their TX producer index
14569 * mailboxes written twice to workaround a bug.
14571 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14573 /* If we are in PCI-X mode, enable register write workaround.
14575 * The workaround is to use indirect register accesses
14576 * for all chip writes not to mailbox registers.
14578 if (tg3_flag(tp, PCIX_MODE)) {
14581 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14583 /* The chip can have it's power management PCI config
14584 * space registers clobbered due to this bug.
14585 * So explicitly force the chip into D0 here.
14587 pci_read_config_dword(tp->pdev,
14588 tp->pm_cap + PCI_PM_CTRL,
14590 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14591 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14592 pci_write_config_dword(tp->pdev,
14593 tp->pm_cap + PCI_PM_CTRL,
14596 /* Also, force SERR#/PERR# in PCI command. */
14597 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14598 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14599 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14603 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14604 tg3_flag_set(tp, PCI_HIGH_SPEED);
14605 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14606 tg3_flag_set(tp, PCI_32BIT);
14608 /* Chip-specific fixup from Broadcom driver */
14609 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14610 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14611 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14612 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14615 /* Default fast path register access methods */
14616 tp->read32 = tg3_read32;
14617 tp->write32 = tg3_write32;
14618 tp->read32_mbox = tg3_read32;
14619 tp->write32_mbox = tg3_write32;
14620 tp->write32_tx_mbox = tg3_write32;
14621 tp->write32_rx_mbox = tg3_write32;
14623 /* Various workaround register access methods */
14624 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14625 tp->write32 = tg3_write_indirect_reg32;
14626 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14627 (tg3_flag(tp, PCI_EXPRESS) &&
14628 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14630 * Back to back register writes can cause problems on these
14631 * chips, the workaround is to read back all reg writes
14632 * except those to mailbox regs.
14634 * See tg3_write_indirect_reg32().
14636 tp->write32 = tg3_write_flush_reg32;
14639 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14640 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14641 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14642 tp->write32_rx_mbox = tg3_write_flush_reg32;
14645 if (tg3_flag(tp, ICH_WORKAROUND)) {
14646 tp->read32 = tg3_read_indirect_reg32;
14647 tp->write32 = tg3_write_indirect_reg32;
14648 tp->read32_mbox = tg3_read_indirect_mbox;
14649 tp->write32_mbox = tg3_write_indirect_mbox;
14650 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14651 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14656 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14657 pci_cmd &= ~PCI_COMMAND_MEMORY;
14658 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14660 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14661 tp->read32_mbox = tg3_read32_mbox_5906;
14662 tp->write32_mbox = tg3_write32_mbox_5906;
14663 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14664 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14667 if (tp->write32 == tg3_write_indirect_reg32 ||
14668 (tg3_flag(tp, PCIX_MODE) &&
14669 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14670 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14671 tg3_flag_set(tp, SRAM_USE_CONFIG);
14673 /* The memory arbiter has to be enabled in order for SRAM accesses
14674 * to succeed. Normally on powerup the tg3 chip firmware will make
14675 * sure it is enabled, but other entities such as system netboot
14676 * code might disable it.
14678 val = tr32(MEMARB_MODE);
14679 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14681 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14682 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14683 tg3_flag(tp, 5780_CLASS)) {
14684 if (tg3_flag(tp, PCIX_MODE)) {
14685 pci_read_config_dword(tp->pdev,
14686 tp->pcix_cap + PCI_X_STATUS,
14688 tp->pci_fn = val & 0x7;
14690 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14691 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14692 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14693 NIC_SRAM_CPMUSTAT_SIG) {
14694 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14695 tp->pci_fn = tp->pci_fn ? 1 : 0;
14697 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14698 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14699 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14700 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14701 NIC_SRAM_CPMUSTAT_SIG) {
14702 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14703 TG3_CPMU_STATUS_FSHFT_5719;
14707 /* Get eeprom hw config before calling tg3_set_power_state().
14708 * In particular, the TG3_FLAG_IS_NIC flag must be
14709 * determined before calling tg3_set_power_state() so that
14710 * we know whether or not to switch out of Vaux power.
14711 * When the flag is set, it means that GPIO1 is used for eeprom
14712 * write protect and also implies that it is a LOM where GPIOs
14713 * are not used to switch power.
14715 tg3_get_eeprom_hw_cfg(tp);
14717 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14718 tg3_flag_clear(tp, TSO_CAPABLE);
14719 tg3_flag_clear(tp, TSO_BUG);
14720 tp->fw_needed = NULL;
14723 if (tg3_flag(tp, ENABLE_APE)) {
14724 /* Allow reads and writes to the
14725 * APE register and memory space.
14727 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14728 PCISTATE_ALLOW_APE_SHMEM_WR |
14729 PCISTATE_ALLOW_APE_PSPACE_WR;
14730 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14733 tg3_ape_lock_init(tp);
14736 /* Set up tp->grc_local_ctrl before calling
14737 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14738 * will bring 5700's external PHY out of reset.
14739 * It is also used as eeprom write protect on LOMs.
14741 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14742 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14743 tg3_flag(tp, EEPROM_WRITE_PROT))
14744 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14745 GRC_LCLCTRL_GPIO_OUTPUT1);
14746 /* Unused GPIO3 must be driven as output on 5752 because there
14747 * are no pull-up resistors on unused GPIO pins.
14749 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14750 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14752 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14753 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14754 tg3_flag(tp, 57765_CLASS))
14755 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14757 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14758 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14759 /* Turn off the debug UART. */
14760 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14761 if (tg3_flag(tp, IS_NIC))
14762 /* Keep VMain power. */
14763 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14764 GRC_LCLCTRL_GPIO_OUTPUT0;
14767 /* Switch out of Vaux if it is a NIC */
14768 tg3_pwrsrc_switch_to_vmain(tp);
14770 /* Derive initial jumbo mode from MTU assigned in
14771 * ether_setup() via the alloc_etherdev() call
14773 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14774 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14776 /* Determine WakeOnLan speed to use. */
14777 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14778 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14779 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14780 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14781 tg3_flag_clear(tp, WOL_SPEED_100MB);
14783 tg3_flag_set(tp, WOL_SPEED_100MB);
14786 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14787 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14789 /* A few boards don't want Ethernet@WireSpeed phy feature */
14790 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14791 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14792 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14793 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14794 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14795 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14796 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14798 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14799 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14800 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14801 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14802 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14804 if (tg3_flag(tp, 5705_PLUS) &&
14805 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14806 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14807 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14808 !tg3_flag(tp, 57765_PLUS)) {
14809 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14810 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14811 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14812 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14813 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14814 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14815 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14816 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14817 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14819 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14822 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14823 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14824 tp->phy_otp = tg3_read_otp_phycfg(tp);
14825 if (tp->phy_otp == 0)
14826 tp->phy_otp = TG3_OTP_DEFAULT;
14829 if (tg3_flag(tp, CPMU_PRESENT))
14830 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14832 tp->mi_mode = MAC_MI_MODE_BASE;
14834 tp->coalesce_mode = 0;
14835 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14836 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14837 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14839 /* Set these bits to enable statistics workaround. */
14840 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14841 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14842 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14843 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14844 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14847 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14848 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14849 tg3_flag_set(tp, USE_PHYLIB);
14851 err = tg3_mdio_init(tp);
14855 /* Initialize data/descriptor byte/word swapping. */
14856 val = tr32(GRC_MODE);
14857 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14858 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14859 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14860 GRC_MODE_B2HRX_ENABLE |
14861 GRC_MODE_HTX2B_ENABLE |
14862 GRC_MODE_HOST_STACKUP);
14864 val &= GRC_MODE_HOST_STACKUP;
14866 tw32(GRC_MODE, val | tp->grc_mode);
14868 tg3_switch_clocks(tp);
14870 /* Clear this out for sanity. */
14871 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14873 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14875 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14876 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14877 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14879 if (chiprevid == CHIPREV_ID_5701_A0 ||
14880 chiprevid == CHIPREV_ID_5701_B0 ||
14881 chiprevid == CHIPREV_ID_5701_B2 ||
14882 chiprevid == CHIPREV_ID_5701_B5) {
14883 void __iomem *sram_base;
14885 /* Write some dummy words into the SRAM status block
14886 * area, see if it reads back correctly. If the return
14887 * value is bad, force enable the PCIX workaround.
14889 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14891 writel(0x00000000, sram_base);
14892 writel(0x00000000, sram_base + 4);
14893 writel(0xffffffff, sram_base + 4);
14894 if (readl(sram_base) != 0x00000000)
14895 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14900 tg3_nvram_init(tp);
14902 grc_misc_cfg = tr32(GRC_MISC_CFG);
14903 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14906 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14907 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14908 tg3_flag_set(tp, IS_5788);
14910 if (!tg3_flag(tp, IS_5788) &&
14911 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14912 tg3_flag_set(tp, TAGGED_STATUS);
14913 if (tg3_flag(tp, TAGGED_STATUS)) {
14914 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14915 HOSTCC_MODE_CLRTICK_TXBD);
14917 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14918 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14919 tp->misc_host_ctrl);
14922 /* Preserve the APE MAC_MODE bits */
14923 if (tg3_flag(tp, ENABLE_APE))
14924 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14928 /* these are limited to 10/100 only */
14929 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14930 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14931 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14932 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14933 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14934 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14935 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14936 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14937 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14938 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14939 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14941 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14942 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14943 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14944 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14946 err = tg3_phy_probe(tp);
14948 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14949 /* ... but do not return immediately ... */
14954 tg3_read_fw_ver(tp);
14956 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14957 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14959 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14960 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14962 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14965 /* 5700 {AX,BX} chips have a broken status block link
14966 * change bit implementation, so we must use the
14967 * status register in those cases.
14969 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14970 tg3_flag_set(tp, USE_LINKCHG_REG);
14972 tg3_flag_clear(tp, USE_LINKCHG_REG);
14974 /* The led_ctrl is set during tg3_phy_probe, here we might
14975 * have to force the link status polling mechanism based
14976 * upon subsystem IDs.
14978 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14980 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14981 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14982 tg3_flag_set(tp, USE_LINKCHG_REG);
14985 /* For all SERDES we poll the MAC status register. */
14986 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14987 tg3_flag_set(tp, POLL_SERDES);
14989 tg3_flag_clear(tp, POLL_SERDES);
14991 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14992 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14993 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14994 tg3_flag(tp, PCIX_MODE)) {
14995 tp->rx_offset = NET_SKB_PAD;
14996 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14997 tp->rx_copy_thresh = ~(u16)0;
15001 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15002 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15003 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15005 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15007 /* Increment the rx prod index on the rx std ring by at most
15008 * 8 for these chips to workaround hw errata.
15010 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15011 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15012 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15013 tp->rx_std_max_post = 8;
15015 if (tg3_flag(tp, ASPM_WORKAROUND))
15016 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15017 PCIE_PWR_MGMT_L1_THRESH_MSK;
15022 #ifdef CONFIG_SPARC
15023 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
15025 struct net_device *dev = tp->dev;
15026 struct pci_dev *pdev = tp->pdev;
15027 struct device_node *dp = pci_device_to_OF_node(pdev);
15028 const unsigned char *addr;
15031 addr = of_get_property(dp, "local-mac-address", &len);
15032 if (addr && len == 6) {
15033 memcpy(dev->dev_addr, addr, 6);
15034 memcpy(dev->perm_addr, dev->dev_addr, 6);
15040 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
15042 struct net_device *dev = tp->dev;
15044 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15045 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15050 static int __devinit tg3_get_device_address(struct tg3 *tp)
15052 struct net_device *dev = tp->dev;
15053 u32 hi, lo, mac_offset;
15056 #ifdef CONFIG_SPARC
15057 if (!tg3_get_macaddr_sparc(tp))
15062 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15063 tg3_flag(tp, 5780_CLASS)) {
15064 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15066 if (tg3_nvram_lock(tp))
15067 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15069 tg3_nvram_unlock(tp);
15070 } else if (tg3_flag(tp, 5717_PLUS)) {
15071 if (tp->pci_fn & 1)
15073 if (tp->pci_fn > 1)
15074 mac_offset += 0x18c;
15075 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15078 /* First try to get it from MAC address mailbox. */
15079 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15080 if ((hi >> 16) == 0x484b) {
15081 dev->dev_addr[0] = (hi >> 8) & 0xff;
15082 dev->dev_addr[1] = (hi >> 0) & 0xff;
15084 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15085 dev->dev_addr[2] = (lo >> 24) & 0xff;
15086 dev->dev_addr[3] = (lo >> 16) & 0xff;
15087 dev->dev_addr[4] = (lo >> 8) & 0xff;
15088 dev->dev_addr[5] = (lo >> 0) & 0xff;
15090 /* Some old bootcode may report a 0 MAC address in SRAM */
15091 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15094 /* Next, try NVRAM. */
15095 if (!tg3_flag(tp, NO_NVRAM) &&
15096 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15097 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15098 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15099 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15101 /* Finally just fetch it out of the MAC control regs. */
15103 hi = tr32(MAC_ADDR_0_HIGH);
15104 lo = tr32(MAC_ADDR_0_LOW);
15106 dev->dev_addr[5] = lo & 0xff;
15107 dev->dev_addr[4] = (lo >> 8) & 0xff;
15108 dev->dev_addr[3] = (lo >> 16) & 0xff;
15109 dev->dev_addr[2] = (lo >> 24) & 0xff;
15110 dev->dev_addr[1] = hi & 0xff;
15111 dev->dev_addr[0] = (hi >> 8) & 0xff;
15115 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15116 #ifdef CONFIG_SPARC
15117 if (!tg3_get_default_macaddr_sparc(tp))
15122 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15126 #define BOUNDARY_SINGLE_CACHELINE 1
15127 #define BOUNDARY_MULTI_CACHELINE 2
15129 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15131 int cacheline_size;
15135 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15137 cacheline_size = 1024;
15139 cacheline_size = (int) byte * 4;
15141 /* On 5703 and later chips, the boundary bits have no
15144 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15145 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15146 !tg3_flag(tp, PCI_EXPRESS))
15149 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15150 goal = BOUNDARY_MULTI_CACHELINE;
15152 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15153 goal = BOUNDARY_SINGLE_CACHELINE;
15159 if (tg3_flag(tp, 57765_PLUS)) {
15160 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15167 /* PCI controllers on most RISC systems tend to disconnect
15168 * when a device tries to burst across a cache-line boundary.
15169 * Therefore, letting tg3 do so just wastes PCI bandwidth.
15171 * Unfortunately, for PCI-E there are only limited
15172 * write-side controls for this, and thus for reads
15173 * we will still get the disconnects. We'll also waste
15174 * these PCI cycles for both read and write for chips
15175 * other than 5700 and 5701 which do not implement the
15178 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15179 switch (cacheline_size) {
15184 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15185 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15186 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15188 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15189 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15194 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15195 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15199 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15200 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15203 } else if (tg3_flag(tp, PCI_EXPRESS)) {
15204 switch (cacheline_size) {
15208 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15209 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15210 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15216 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15217 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15221 switch (cacheline_size) {
15223 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15224 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15225 DMA_RWCTRL_WRITE_BNDRY_16);
15230 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15231 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15232 DMA_RWCTRL_WRITE_BNDRY_32);
15237 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15238 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15239 DMA_RWCTRL_WRITE_BNDRY_64);
15244 if (goal == BOUNDARY_SINGLE_CACHELINE) {
15245 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15246 DMA_RWCTRL_WRITE_BNDRY_128);
15251 val |= (DMA_RWCTRL_READ_BNDRY_256 |
15252 DMA_RWCTRL_WRITE_BNDRY_256);
15255 val |= (DMA_RWCTRL_READ_BNDRY_512 |
15256 DMA_RWCTRL_WRITE_BNDRY_512);
15260 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15261 DMA_RWCTRL_WRITE_BNDRY_1024);
15270 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15272 struct tg3_internal_buffer_desc test_desc;
15273 u32 sram_dma_descs;
15276 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15278 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15279 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15280 tw32(RDMAC_STATUS, 0);
15281 tw32(WDMAC_STATUS, 0);
15283 tw32(BUFMGR_MODE, 0);
15284 tw32(FTQ_RESET, 0);
15286 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15287 test_desc.addr_lo = buf_dma & 0xffffffff;
15288 test_desc.nic_mbuf = 0x00002100;
15289 test_desc.len = size;
15292 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15293 * the *second* time the tg3 driver was getting loaded after an
15296 * Broadcom tells me:
15297 * ...the DMA engine is connected to the GRC block and a DMA
15298 * reset may affect the GRC block in some unpredictable way...
15299 * The behavior of resets to individual blocks has not been tested.
15301 * Broadcom noted the GRC reset will also reset all sub-components.
15304 test_desc.cqid_sqid = (13 << 8) | 2;
15306 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15309 test_desc.cqid_sqid = (16 << 8) | 7;
15311 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15314 test_desc.flags = 0x00000005;
15316 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15319 val = *(((u32 *)&test_desc) + i);
15320 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15321 sram_dma_descs + (i * sizeof(u32)));
15322 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15324 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15327 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15329 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15332 for (i = 0; i < 40; i++) {
15336 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15338 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15339 if ((val & 0xffff) == sram_dma_descs) {
15350 #define TEST_BUFFER_SIZE 0x2000
15352 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15353 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15357 static int __devinit tg3_test_dma(struct tg3 *tp)
15359 dma_addr_t buf_dma;
15360 u32 *buf, saved_dma_rwctrl;
15363 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15364 &buf_dma, GFP_KERNEL);
15370 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15371 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15373 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15375 if (tg3_flag(tp, 57765_PLUS))
15378 if (tg3_flag(tp, PCI_EXPRESS)) {
15379 /* DMA read watermark not used on PCIE */
15380 tp->dma_rwctrl |= 0x00180000;
15381 } else if (!tg3_flag(tp, PCIX_MODE)) {
15382 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15383 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15384 tp->dma_rwctrl |= 0x003f0000;
15386 tp->dma_rwctrl |= 0x003f000f;
15388 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15389 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15390 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15391 u32 read_water = 0x7;
15393 /* If the 5704 is behind the EPB bridge, we can
15394 * do the less restrictive ONE_DMA workaround for
15395 * better performance.
15397 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15398 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15399 tp->dma_rwctrl |= 0x8000;
15400 else if (ccval == 0x6 || ccval == 0x7)
15401 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15403 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15405 /* Set bit 23 to enable PCIX hw bug fix */
15407 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15408 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15410 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15411 /* 5780 always in PCIX mode */
15412 tp->dma_rwctrl |= 0x00144000;
15413 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15414 /* 5714 always in PCIX mode */
15415 tp->dma_rwctrl |= 0x00148000;
15417 tp->dma_rwctrl |= 0x001b000f;
15421 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15422 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15423 tp->dma_rwctrl &= 0xfffffff0;
15425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15426 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15427 /* Remove this if it causes problems for some boards. */
15428 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15430 /* On 5700/5701 chips, we need to set this bit.
15431 * Otherwise the chip will issue cacheline transactions
15432 * to streamable DMA memory with not all the byte
15433 * enables turned on. This is an error on several
15434 * RISC PCI controllers, in particular sparc64.
15436 * On 5703/5704 chips, this bit has been reassigned
15437 * a different meaning. In particular, it is used
15438 * on those chips to enable a PCI-X workaround.
15440 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15443 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15446 /* Unneeded, already done by tg3_get_invariants. */
15447 tg3_switch_clocks(tp);
15450 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15451 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15454 /* It is best to perform DMA test with maximum write burst size
15455 * to expose the 5700/5701 write DMA bug.
15457 saved_dma_rwctrl = tp->dma_rwctrl;
15458 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15459 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15464 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15467 /* Send the buffer to the chip. */
15468 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15470 dev_err(&tp->pdev->dev,
15471 "%s: Buffer write failed. err = %d\n",
15477 /* validate data reached card RAM correctly. */
15478 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15480 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15481 if (le32_to_cpu(val) != p[i]) {
15482 dev_err(&tp->pdev->dev,
15483 "%s: Buffer corrupted on device! "
15484 "(%d != %d)\n", __func__, val, i);
15485 /* ret = -ENODEV here? */
15490 /* Now read it back. */
15491 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15493 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15494 "err = %d\n", __func__, ret);
15499 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15503 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15504 DMA_RWCTRL_WRITE_BNDRY_16) {
15505 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15506 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15507 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15510 dev_err(&tp->pdev->dev,
15511 "%s: Buffer corrupted on read back! "
15512 "(%d != %d)\n", __func__, p[i], i);
15518 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15524 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15525 DMA_RWCTRL_WRITE_BNDRY_16) {
15526 /* DMA test passed without adjusting DMA boundary,
15527 * now look for chipsets that are known to expose the
15528 * DMA bug without failing the test.
15530 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15531 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15532 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15534 /* Safe to use the calculated DMA boundary. */
15535 tp->dma_rwctrl = saved_dma_rwctrl;
15538 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15542 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15547 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15549 if (tg3_flag(tp, 57765_PLUS)) {
15550 tp->bufmgr_config.mbuf_read_dma_low_water =
15551 DEFAULT_MB_RDMA_LOW_WATER_5705;
15552 tp->bufmgr_config.mbuf_mac_rx_low_water =
15553 DEFAULT_MB_MACRX_LOW_WATER_57765;
15554 tp->bufmgr_config.mbuf_high_water =
15555 DEFAULT_MB_HIGH_WATER_57765;
15557 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15558 DEFAULT_MB_RDMA_LOW_WATER_5705;
15559 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15560 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15561 tp->bufmgr_config.mbuf_high_water_jumbo =
15562 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15563 } else if (tg3_flag(tp, 5705_PLUS)) {
15564 tp->bufmgr_config.mbuf_read_dma_low_water =
15565 DEFAULT_MB_RDMA_LOW_WATER_5705;
15566 tp->bufmgr_config.mbuf_mac_rx_low_water =
15567 DEFAULT_MB_MACRX_LOW_WATER_5705;
15568 tp->bufmgr_config.mbuf_high_water =
15569 DEFAULT_MB_HIGH_WATER_5705;
15570 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15571 tp->bufmgr_config.mbuf_mac_rx_low_water =
15572 DEFAULT_MB_MACRX_LOW_WATER_5906;
15573 tp->bufmgr_config.mbuf_high_water =
15574 DEFAULT_MB_HIGH_WATER_5906;
15577 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15578 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15579 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15580 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15581 tp->bufmgr_config.mbuf_high_water_jumbo =
15582 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15584 tp->bufmgr_config.mbuf_read_dma_low_water =
15585 DEFAULT_MB_RDMA_LOW_WATER;
15586 tp->bufmgr_config.mbuf_mac_rx_low_water =
15587 DEFAULT_MB_MACRX_LOW_WATER;
15588 tp->bufmgr_config.mbuf_high_water =
15589 DEFAULT_MB_HIGH_WATER;
15591 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15592 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15593 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15594 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15595 tp->bufmgr_config.mbuf_high_water_jumbo =
15596 DEFAULT_MB_HIGH_WATER_JUMBO;
15599 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15600 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15603 static char * __devinit tg3_phy_string(struct tg3 *tp)
15605 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15606 case TG3_PHY_ID_BCM5400: return "5400";
15607 case TG3_PHY_ID_BCM5401: return "5401";
15608 case TG3_PHY_ID_BCM5411: return "5411";
15609 case TG3_PHY_ID_BCM5701: return "5701";
15610 case TG3_PHY_ID_BCM5703: return "5703";
15611 case TG3_PHY_ID_BCM5704: return "5704";
15612 case TG3_PHY_ID_BCM5705: return "5705";
15613 case TG3_PHY_ID_BCM5750: return "5750";
15614 case TG3_PHY_ID_BCM5752: return "5752";
15615 case TG3_PHY_ID_BCM5714: return "5714";
15616 case TG3_PHY_ID_BCM5780: return "5780";
15617 case TG3_PHY_ID_BCM5755: return "5755";
15618 case TG3_PHY_ID_BCM5787: return "5787";
15619 case TG3_PHY_ID_BCM5784: return "5784";
15620 case TG3_PHY_ID_BCM5756: return "5722/5756";
15621 case TG3_PHY_ID_BCM5906: return "5906";
15622 case TG3_PHY_ID_BCM5761: return "5761";
15623 case TG3_PHY_ID_BCM5718C: return "5718C";
15624 case TG3_PHY_ID_BCM5718S: return "5718S";
15625 case TG3_PHY_ID_BCM57765: return "57765";
15626 case TG3_PHY_ID_BCM5719C: return "5719C";
15627 case TG3_PHY_ID_BCM5720C: return "5720C";
15628 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15629 case 0: return "serdes";
15630 default: return "unknown";
15634 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15636 if (tg3_flag(tp, PCI_EXPRESS)) {
15637 strcpy(str, "PCI Express");
15639 } else if (tg3_flag(tp, PCIX_MODE)) {
15640 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15642 strcpy(str, "PCIX:");
15644 if ((clock_ctrl == 7) ||
15645 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15646 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15647 strcat(str, "133MHz");
15648 else if (clock_ctrl == 0)
15649 strcat(str, "33MHz");
15650 else if (clock_ctrl == 2)
15651 strcat(str, "50MHz");
15652 else if (clock_ctrl == 4)
15653 strcat(str, "66MHz");
15654 else if (clock_ctrl == 6)
15655 strcat(str, "100MHz");
15657 strcpy(str, "PCI:");
15658 if (tg3_flag(tp, PCI_HIGH_SPEED))
15659 strcat(str, "66MHz");
15661 strcat(str, "33MHz");
15663 if (tg3_flag(tp, PCI_32BIT))
15664 strcat(str, ":32-bit");
15666 strcat(str, ":64-bit");
15670 static void __devinit tg3_init_coal(struct tg3 *tp)
15672 struct ethtool_coalesce *ec = &tp->coal;
15674 memset(ec, 0, sizeof(*ec));
15675 ec->cmd = ETHTOOL_GCOALESCE;
15676 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15677 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15678 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15679 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15680 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15681 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15682 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15683 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15684 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15686 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15687 HOSTCC_MODE_CLRTICK_TXBD)) {
15688 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15689 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15690 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15691 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15694 if (tg3_flag(tp, 5705_PLUS)) {
15695 ec->rx_coalesce_usecs_irq = 0;
15696 ec->tx_coalesce_usecs_irq = 0;
15697 ec->stats_block_coalesce_usecs = 0;
15701 static int __devinit tg3_init_one(struct pci_dev *pdev,
15702 const struct pci_device_id *ent)
15704 struct net_device *dev;
15706 int i, err, pm_cap;
15707 u32 sndmbx, rcvmbx, intmbx;
15709 u64 dma_mask, persist_dma_mask;
15710 netdev_features_t features = 0;
15712 printk_once(KERN_INFO "%s\n", version);
15714 err = pci_enable_device(pdev);
15716 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15720 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15722 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15723 goto err_out_disable_pdev;
15726 pci_set_master(pdev);
15728 /* Find power-management capability. */
15729 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15731 dev_err(&pdev->dev,
15732 "Cannot find Power Management capability, aborting\n");
15734 goto err_out_free_res;
15737 err = pci_set_power_state(pdev, PCI_D0);
15739 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15740 goto err_out_free_res;
15743 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15746 goto err_out_power_down;
15749 SET_NETDEV_DEV(dev, &pdev->dev);
15751 tp = netdev_priv(dev);
15754 tp->pm_cap = pm_cap;
15755 tp->rx_mode = TG3_DEF_RX_MODE;
15756 tp->tx_mode = TG3_DEF_TX_MODE;
15759 tp->msg_enable = tg3_debug;
15761 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15763 /* The word/byte swap controls here control register access byte
15764 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15767 tp->misc_host_ctrl =
15768 MISC_HOST_CTRL_MASK_PCI_INT |
15769 MISC_HOST_CTRL_WORD_SWAP |
15770 MISC_HOST_CTRL_INDIR_ACCESS |
15771 MISC_HOST_CTRL_PCISTATE_RW;
15773 /* The NONFRM (non-frame) byte/word swap controls take effect
15774 * on descriptor entries, anything which isn't packet data.
15776 * The StrongARM chips on the board (one for tx, one for rx)
15777 * are running in big-endian mode.
15779 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15780 GRC_MODE_WSWAP_NONFRM_DATA);
15781 #ifdef __BIG_ENDIAN
15782 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15784 spin_lock_init(&tp->lock);
15785 spin_lock_init(&tp->indirect_lock);
15786 INIT_WORK(&tp->reset_task, tg3_reset_task);
15788 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15790 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15792 goto err_out_free_dev;
15795 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15796 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15797 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15798 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15799 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15800 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15801 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15802 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15803 tg3_flag_set(tp, ENABLE_APE);
15804 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15805 if (!tp->aperegs) {
15806 dev_err(&pdev->dev,
15807 "Cannot map APE registers, aborting\n");
15809 goto err_out_iounmap;
15813 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15814 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15816 dev->ethtool_ops = &tg3_ethtool_ops;
15817 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15818 dev->netdev_ops = &tg3_netdev_ops;
15819 dev->irq = pdev->irq;
15821 err = tg3_get_invariants(tp);
15823 dev_err(&pdev->dev,
15824 "Problem fetching invariants of chip, aborting\n");
15825 goto err_out_apeunmap;
15828 /* The EPB bridge inside 5714, 5715, and 5780 and any
15829 * device behind the EPB cannot support DMA addresses > 40-bit.
15830 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15831 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15832 * do DMA address check in tg3_start_xmit().
15834 if (tg3_flag(tp, IS_5788))
15835 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15836 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15837 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15838 #ifdef CONFIG_HIGHMEM
15839 dma_mask = DMA_BIT_MASK(64);
15842 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15844 /* Configure DMA attributes. */
15845 if (dma_mask > DMA_BIT_MASK(32)) {
15846 err = pci_set_dma_mask(pdev, dma_mask);
15848 features |= NETIF_F_HIGHDMA;
15849 err = pci_set_consistent_dma_mask(pdev,
15852 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15853 "DMA for consistent allocations\n");
15854 goto err_out_apeunmap;
15858 if (err || dma_mask == DMA_BIT_MASK(32)) {
15859 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15861 dev_err(&pdev->dev,
15862 "No usable DMA configuration, aborting\n");
15863 goto err_out_apeunmap;
15867 tg3_init_bufmgr_config(tp);
15869 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15871 /* 5700 B0 chips do not support checksumming correctly due
15872 * to hardware bugs.
15874 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15875 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15877 if (tg3_flag(tp, 5755_PLUS))
15878 features |= NETIF_F_IPV6_CSUM;
15881 /* TSO is on by default on chips that support hardware TSO.
15882 * Firmware TSO on older chips gives lower performance, so it
15883 * is off by default, but can be enabled using ethtool.
15885 if ((tg3_flag(tp, HW_TSO_1) ||
15886 tg3_flag(tp, HW_TSO_2) ||
15887 tg3_flag(tp, HW_TSO_3)) &&
15888 (features & NETIF_F_IP_CSUM))
15889 features |= NETIF_F_TSO;
15890 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15891 if (features & NETIF_F_IPV6_CSUM)
15892 features |= NETIF_F_TSO6;
15893 if (tg3_flag(tp, HW_TSO_3) ||
15894 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15895 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15896 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15898 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15899 features |= NETIF_F_TSO_ECN;
15902 dev->features |= features;
15903 dev->vlan_features |= features;
15906 * Add loopback capability only for a subset of devices that support
15907 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15908 * loopback for the remaining devices.
15910 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15911 !tg3_flag(tp, CPMU_PRESENT))
15912 /* Add the loopback capability */
15913 features |= NETIF_F_LOOPBACK;
15915 dev->hw_features |= features;
15917 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15918 !tg3_flag(tp, TSO_CAPABLE) &&
15919 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15920 tg3_flag_set(tp, MAX_RXPEND_64);
15921 tp->rx_pending = 63;
15924 err = tg3_get_device_address(tp);
15926 dev_err(&pdev->dev,
15927 "Could not obtain valid ethernet address, aborting\n");
15928 goto err_out_apeunmap;
15932 * Reset chip in case UNDI or EFI driver did not shutdown
15933 * DMA self test will enable WDMAC and we'll see (spurious)
15934 * pending DMA on the PCI bus at that point.
15936 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15937 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15938 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15939 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15942 err = tg3_test_dma(tp);
15944 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15945 goto err_out_apeunmap;
15948 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15949 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15950 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15951 for (i = 0; i < tp->irq_max; i++) {
15952 struct tg3_napi *tnapi = &tp->napi[i];
15955 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15957 tnapi->int_mbox = intmbx;
15963 tnapi->consmbox = rcvmbx;
15964 tnapi->prodmbox = sndmbx;
15967 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15969 tnapi->coal_now = HOSTCC_MODE_NOW;
15971 if (!tg3_flag(tp, SUPPORT_MSIX))
15975 * If we support MSIX, we'll be using RSS. If we're using
15976 * RSS, the first vector only handles link interrupts and the
15977 * remaining vectors handle rx and tx interrupts. Reuse the
15978 * mailbox values for the next iteration. The values we setup
15979 * above are still useful for the single vectored mode.
15994 pci_set_drvdata(pdev, dev);
15996 if (tg3_flag(tp, 5717_PLUS)) {
15997 /* Resume a low-power mode */
15998 tg3_frob_aux_power(tp, false);
16001 tg3_timer_init(tp);
16003 err = register_netdev(dev);
16005 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16006 goto err_out_apeunmap;
16009 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16010 tp->board_part_number,
16011 tp->pci_chip_rev_id,
16012 tg3_bus_string(tp, str),
16015 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16016 struct phy_device *phydev;
16017 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16019 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16020 phydev->drv->name, dev_name(&phydev->dev));
16024 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16025 ethtype = "10/100Base-TX";
16026 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16027 ethtype = "1000Base-SX";
16029 ethtype = "10/100/1000Base-T";
16031 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16032 "(WireSpeed[%d], EEE[%d])\n",
16033 tg3_phy_string(tp), ethtype,
16034 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16035 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16038 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16039 (dev->features & NETIF_F_RXCSUM) != 0,
16040 tg3_flag(tp, USE_LINKCHG_REG) != 0,
16041 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16042 tg3_flag(tp, ENABLE_ASF) != 0,
16043 tg3_flag(tp, TSO_CAPABLE) != 0);
16044 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16046 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16047 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16049 pci_save_state(pdev);
16055 iounmap(tp->aperegs);
16056 tp->aperegs = NULL;
16068 err_out_power_down:
16069 pci_set_power_state(pdev, PCI_D3hot);
16072 pci_release_regions(pdev);
16074 err_out_disable_pdev:
16075 pci_disable_device(pdev);
16076 pci_set_drvdata(pdev, NULL);
16080 static void __devexit tg3_remove_one(struct pci_dev *pdev)
16082 struct net_device *dev = pci_get_drvdata(pdev);
16085 struct tg3 *tp = netdev_priv(dev);
16087 release_firmware(tp->fw);
16089 tg3_reset_task_cancel(tp);
16091 if (tg3_flag(tp, USE_PHYLIB)) {
16096 unregister_netdev(dev);
16098 iounmap(tp->aperegs);
16099 tp->aperegs = NULL;
16106 pci_release_regions(pdev);
16107 pci_disable_device(pdev);
16108 pci_set_drvdata(pdev, NULL);
16112 #ifdef CONFIG_PM_SLEEP
16113 static int tg3_suspend(struct device *device)
16115 struct pci_dev *pdev = to_pci_dev(device);
16116 struct net_device *dev = pci_get_drvdata(pdev);
16117 struct tg3 *tp = netdev_priv(dev);
16120 if (!netif_running(dev))
16123 tg3_reset_task_cancel(tp);
16125 tg3_netif_stop(tp);
16127 tg3_timer_stop(tp);
16129 tg3_full_lock(tp, 1);
16130 tg3_disable_ints(tp);
16131 tg3_full_unlock(tp);
16133 netif_device_detach(dev);
16135 tg3_full_lock(tp, 0);
16136 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16137 tg3_flag_clear(tp, INIT_COMPLETE);
16138 tg3_full_unlock(tp);
16140 err = tg3_power_down_prepare(tp);
16144 tg3_full_lock(tp, 0);
16146 tg3_flag_set(tp, INIT_COMPLETE);
16147 err2 = tg3_restart_hw(tp, 1);
16151 tg3_timer_start(tp);
16153 netif_device_attach(dev);
16154 tg3_netif_start(tp);
16157 tg3_full_unlock(tp);
16166 static int tg3_resume(struct device *device)
16168 struct pci_dev *pdev = to_pci_dev(device);
16169 struct net_device *dev = pci_get_drvdata(pdev);
16170 struct tg3 *tp = netdev_priv(dev);
16173 if (!netif_running(dev))
16176 netif_device_attach(dev);
16178 tg3_full_lock(tp, 0);
16180 tg3_flag_set(tp, INIT_COMPLETE);
16181 err = tg3_restart_hw(tp, 1);
16185 tg3_timer_start(tp);
16187 tg3_netif_start(tp);
16190 tg3_full_unlock(tp);
16198 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16199 #define TG3_PM_OPS (&tg3_pm_ops)
16203 #define TG3_PM_OPS NULL
16205 #endif /* CONFIG_PM_SLEEP */
16208 * tg3_io_error_detected - called when PCI error is detected
16209 * @pdev: Pointer to PCI device
16210 * @state: The current pci connection state
16212 * This function is called after a PCI bus error affecting
16213 * this device has been detected.
16215 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16216 pci_channel_state_t state)
16218 struct net_device *netdev = pci_get_drvdata(pdev);
16219 struct tg3 *tp = netdev_priv(netdev);
16220 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16222 netdev_info(netdev, "PCI I/O error detected\n");
16226 if (!netif_running(netdev))
16231 tg3_netif_stop(tp);
16233 tg3_timer_stop(tp);
16235 /* Want to make sure that the reset task doesn't run */
16236 tg3_reset_task_cancel(tp);
16238 netif_device_detach(netdev);
16240 /* Clean up software state, even if MMIO is blocked */
16241 tg3_full_lock(tp, 0);
16242 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16243 tg3_full_unlock(tp);
16246 if (state == pci_channel_io_perm_failure)
16247 err = PCI_ERS_RESULT_DISCONNECT;
16249 pci_disable_device(pdev);
16257 * tg3_io_slot_reset - called after the pci bus has been reset.
16258 * @pdev: Pointer to PCI device
16260 * Restart the card from scratch, as if from a cold-boot.
16261 * At this point, the card has exprienced a hard reset,
16262 * followed by fixups by BIOS, and has its config space
16263 * set up identically to what it was at cold boot.
16265 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16267 struct net_device *netdev = pci_get_drvdata(pdev);
16268 struct tg3 *tp = netdev_priv(netdev);
16269 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16274 if (pci_enable_device(pdev)) {
16275 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16279 pci_set_master(pdev);
16280 pci_restore_state(pdev);
16281 pci_save_state(pdev);
16283 if (!netif_running(netdev)) {
16284 rc = PCI_ERS_RESULT_RECOVERED;
16288 err = tg3_power_up(tp);
16292 rc = PCI_ERS_RESULT_RECOVERED;
16301 * tg3_io_resume - called when traffic can start flowing again.
16302 * @pdev: Pointer to PCI device
16304 * This callback is called when the error recovery driver tells
16305 * us that its OK to resume normal operation.
16307 static void tg3_io_resume(struct pci_dev *pdev)
16309 struct net_device *netdev = pci_get_drvdata(pdev);
16310 struct tg3 *tp = netdev_priv(netdev);
16315 if (!netif_running(netdev))
16318 tg3_full_lock(tp, 0);
16319 tg3_flag_set(tp, INIT_COMPLETE);
16320 err = tg3_restart_hw(tp, 1);
16321 tg3_full_unlock(tp);
16323 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16327 netif_device_attach(netdev);
16329 tg3_timer_start(tp);
16331 tg3_netif_start(tp);
16339 static struct pci_error_handlers tg3_err_handler = {
16340 .error_detected = tg3_io_error_detected,
16341 .slot_reset = tg3_io_slot_reset,
16342 .resume = tg3_io_resume
16345 static struct pci_driver tg3_driver = {
16346 .name = DRV_MODULE_NAME,
16347 .id_table = tg3_pci_tbl,
16348 .probe = tg3_init_one,
16349 .remove = __devexit_p(tg3_remove_one),
16350 .err_handler = &tg3_err_handler,
16351 .driver.pm = TG3_PM_OPS,
16354 static int __init tg3_init(void)
16356 return pci_register_driver(&tg3_driver);
16359 static void __exit tg3_cleanup(void)
16361 pci_unregister_driver(&tg3_driver);
16364 module_init(tg3_init);
16365 module_exit(tg3_cleanup);