2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 135
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "Nov 14, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
341 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
342 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
343 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
344 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
345 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
346 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
347 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
348 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
349 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
350 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
351 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
352 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
358 static const struct {
359 const char string[ETH_GSTRING_LEN];
360 } ethtool_stats_keys[] = {
363 { "rx_ucast_packets" },
364 { "rx_mcast_packets" },
365 { "rx_bcast_packets" },
367 { "rx_align_errors" },
368 { "rx_xon_pause_rcvd" },
369 { "rx_xoff_pause_rcvd" },
370 { "rx_mac_ctrl_rcvd" },
371 { "rx_xoff_entered" },
372 { "rx_frame_too_long_errors" },
374 { "rx_undersize_packets" },
375 { "rx_in_length_errors" },
376 { "rx_out_length_errors" },
377 { "rx_64_or_less_octet_packets" },
378 { "rx_65_to_127_octet_packets" },
379 { "rx_128_to_255_octet_packets" },
380 { "rx_256_to_511_octet_packets" },
381 { "rx_512_to_1023_octet_packets" },
382 { "rx_1024_to_1522_octet_packets" },
383 { "rx_1523_to_2047_octet_packets" },
384 { "rx_2048_to_4095_octet_packets" },
385 { "rx_4096_to_8191_octet_packets" },
386 { "rx_8192_to_9022_octet_packets" },
393 { "tx_flow_control" },
395 { "tx_single_collisions" },
396 { "tx_mult_collisions" },
398 { "tx_excessive_collisions" },
399 { "tx_late_collisions" },
400 { "tx_collide_2times" },
401 { "tx_collide_3times" },
402 { "tx_collide_4times" },
403 { "tx_collide_5times" },
404 { "tx_collide_6times" },
405 { "tx_collide_7times" },
406 { "tx_collide_8times" },
407 { "tx_collide_9times" },
408 { "tx_collide_10times" },
409 { "tx_collide_11times" },
410 { "tx_collide_12times" },
411 { "tx_collide_13times" },
412 { "tx_collide_14times" },
413 { "tx_collide_15times" },
414 { "tx_ucast_packets" },
415 { "tx_mcast_packets" },
416 { "tx_bcast_packets" },
417 { "tx_carrier_sense_errors" },
421 { "dma_writeq_full" },
422 { "dma_write_prioq_full" },
426 { "rx_threshold_hit" },
428 { "dma_readq_full" },
429 { "dma_read_prioq_full" },
430 { "tx_comp_queue_full" },
432 { "ring_set_send_prod_index" },
433 { "ring_status_update" },
435 { "nic_avoided_irqs" },
436 { "nic_tx_threshold_hit" },
438 { "mbuf_lwm_thresh_hit" },
441 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
442 #define TG3_NVRAM_TEST 0
443 #define TG3_LINK_TEST 1
444 #define TG3_REGISTER_TEST 2
445 #define TG3_MEMORY_TEST 3
446 #define TG3_MAC_LOOPB_TEST 4
447 #define TG3_PHY_LOOPB_TEST 5
448 #define TG3_EXT_LOOPB_TEST 6
449 #define TG3_INTERRUPT_TEST 7
452 static const struct {
453 const char string[ETH_GSTRING_LEN];
454 } ethtool_test_keys[] = {
455 [TG3_NVRAM_TEST] = { "nvram test (online) " },
456 [TG3_LINK_TEST] = { "link test (online) " },
457 [TG3_REGISTER_TEST] = { "register test (offline)" },
458 [TG3_MEMORY_TEST] = { "memory test (offline)" },
459 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
460 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
461 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
462 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
465 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
468 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
470 writel(val, tp->regs + off);
473 static u32 tg3_read32(struct tg3 *tp, u32 off)
475 return readl(tp->regs + off);
478 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
480 writel(val, tp->aperegs + off);
483 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
485 return readl(tp->aperegs + off);
488 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492 spin_lock_irqsave(&tp->indirect_lock, flags);
493 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
494 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
495 spin_unlock_irqrestore(&tp->indirect_lock, flags);
498 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
500 writel(val, tp->regs + off);
501 readl(tp->regs + off);
504 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
509 spin_lock_irqsave(&tp->indirect_lock, flags);
510 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
511 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
512 spin_unlock_irqrestore(&tp->indirect_lock, flags);
516 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
521 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
522 TG3_64BIT_REG_LOW, val);
525 if (off == TG3_RX_STD_PROD_IDX_REG) {
526 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
527 TG3_64BIT_REG_LOW, val);
531 spin_lock_irqsave(&tp->indirect_lock, flags);
532 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
533 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
534 spin_unlock_irqrestore(&tp->indirect_lock, flags);
536 /* In indirect mode when disabling interrupts, we also need
537 * to clear the interrupt bit in the GRC local ctrl register.
539 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
541 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
542 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
551 spin_lock_irqsave(&tp->indirect_lock, flags);
552 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
553 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
554 spin_unlock_irqrestore(&tp->indirect_lock, flags);
558 /* usec_wait specifies the wait time in usec when writing to certain registers
559 * where it is unsafe to read back the register without some delay.
560 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
561 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
563 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
565 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
566 /* Non-posted methods */
567 tp->write32(tp, off, val);
570 tg3_write32(tp, off, val);
575 /* Wait again after the read for the posted method to guarantee that
576 * the wait time is met.
582 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
584 tp->write32_mbox(tp, off, val);
585 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
586 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
587 !tg3_flag(tp, ICH_WORKAROUND)))
588 tp->read32_mbox(tp, off);
591 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
593 void __iomem *mbox = tp->regs + off;
595 if (tg3_flag(tp, TXD_MBOX_HWBUG))
597 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
598 tg3_flag(tp, FLUSH_POSTED_WRITES))
602 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
604 return readl(tp->regs + off + GRCMBOX_BASE);
607 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
609 writel(val, tp->regs + off + GRCMBOX_BASE);
612 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
613 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
614 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
615 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
616 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
618 #define tw32(reg, val) tp->write32(tp, reg, val)
619 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
620 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
621 #define tr32(reg) tp->read32(tp, reg)
623 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
628 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
631 spin_lock_irqsave(&tp->indirect_lock, flags);
632 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
633 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
634 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
636 /* Always leave this as zero. */
637 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
639 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
640 tw32_f(TG3PCI_MEM_WIN_DATA, val);
642 /* Always leave this as zero. */
643 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
645 spin_unlock_irqrestore(&tp->indirect_lock, flags);
648 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
653 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
658 spin_lock_irqsave(&tp->indirect_lock, flags);
659 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
660 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
661 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
663 /* Always leave this as zero. */
664 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
666 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
667 *val = tr32(TG3PCI_MEM_WIN_DATA);
669 /* Always leave this as zero. */
670 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
672 spin_unlock_irqrestore(&tp->indirect_lock, flags);
675 static void tg3_ape_lock_init(struct tg3 *tp)
680 if (tg3_asic_rev(tp) == ASIC_REV_5761)
681 regbase = TG3_APE_LOCK_GRANT;
683 regbase = TG3_APE_PER_LOCK_GRANT;
685 /* Make sure the driver hasn't any stale locks. */
686 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
688 case TG3_APE_LOCK_PHY0:
689 case TG3_APE_LOCK_PHY1:
690 case TG3_APE_LOCK_PHY2:
691 case TG3_APE_LOCK_PHY3:
692 bit = APE_LOCK_GRANT_DRIVER;
696 bit = APE_LOCK_GRANT_DRIVER;
698 bit = 1 << tp->pci_fn;
700 tg3_ape_write32(tp, regbase + 4 * i, bit);
705 static int tg3_ape_lock(struct tg3 *tp, int locknum)
709 u32 status, req, gnt, bit;
711 if (!tg3_flag(tp, ENABLE_APE))
715 case TG3_APE_LOCK_GPIO:
716 if (tg3_asic_rev(tp) == ASIC_REV_5761)
718 case TG3_APE_LOCK_GRC:
719 case TG3_APE_LOCK_MEM:
721 bit = APE_LOCK_REQ_DRIVER;
723 bit = 1 << tp->pci_fn;
725 case TG3_APE_LOCK_PHY0:
726 case TG3_APE_LOCK_PHY1:
727 case TG3_APE_LOCK_PHY2:
728 case TG3_APE_LOCK_PHY3:
729 bit = APE_LOCK_REQ_DRIVER;
735 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
736 req = TG3_APE_LOCK_REQ;
737 gnt = TG3_APE_LOCK_GRANT;
739 req = TG3_APE_PER_LOCK_REQ;
740 gnt = TG3_APE_PER_LOCK_GRANT;
745 tg3_ape_write32(tp, req + off, bit);
747 /* Wait for up to 1 millisecond to acquire lock. */
748 for (i = 0; i < 100; i++) {
749 status = tg3_ape_read32(tp, gnt + off);
752 if (pci_channel_offline(tp->pdev))
759 /* Revoke the lock request. */
760 tg3_ape_write32(tp, gnt + off, bit);
767 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771 if (!tg3_flag(tp, ENABLE_APE))
775 case TG3_APE_LOCK_GPIO:
776 if (tg3_asic_rev(tp) == ASIC_REV_5761)
778 case TG3_APE_LOCK_GRC:
779 case TG3_APE_LOCK_MEM:
781 bit = APE_LOCK_GRANT_DRIVER;
783 bit = 1 << tp->pci_fn;
785 case TG3_APE_LOCK_PHY0:
786 case TG3_APE_LOCK_PHY1:
787 case TG3_APE_LOCK_PHY2:
788 case TG3_APE_LOCK_PHY3:
789 bit = APE_LOCK_GRANT_DRIVER;
795 if (tg3_asic_rev(tp) == ASIC_REV_5761)
796 gnt = TG3_APE_LOCK_GRANT;
798 gnt = TG3_APE_PER_LOCK_GRANT;
800 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
803 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
808 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
811 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
815 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
818 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
821 return timeout_us ? 0 : -EBUSY;
824 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828 for (i = 0; i < timeout_us / 10; i++) {
829 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
831 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
837 return i == timeout_us / 10;
840 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844 u32 i, bufoff, msgoff, maxlen, apedata;
846 if (!tg3_flag(tp, APE_HAS_NCSI))
849 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
850 if (apedata != APE_SEG_SIG_MAGIC)
853 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
854 if (!(apedata & APE_FW_STATUS_READY))
857 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
859 msgoff = bufoff + 2 * sizeof(u32);
860 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
865 /* Cap xfer sizes to scratchpad limits. */
866 length = (len > maxlen) ? maxlen : len;
869 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
870 if (!(apedata & APE_FW_STATUS_READY))
873 /* Wait for up to 1 msec for APE to service previous event. */
874 err = tg3_ape_event_lock(tp, 1000);
878 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
879 APE_EVENT_STATUS_SCRTCHPD_READ |
880 APE_EVENT_STATUS_EVENT_PENDING;
881 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
883 tg3_ape_write32(tp, bufoff, base_off);
884 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
886 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
887 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891 if (tg3_ape_wait_for_event(tp, 30000))
894 for (i = 0; length; i += 4, length -= 4) {
895 u32 val = tg3_ape_read32(tp, msgoff + i);
896 memcpy(data, &val, sizeof(u32));
904 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
909 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
910 if (apedata != APE_SEG_SIG_MAGIC)
913 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
914 if (!(apedata & APE_FW_STATUS_READY))
917 /* Wait for up to 1 millisecond for APE to service previous event. */
918 err = tg3_ape_event_lock(tp, 1000);
922 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
923 event | APE_EVENT_STATUS_EVENT_PENDING);
925 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
926 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
931 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
936 if (!tg3_flag(tp, ENABLE_APE))
940 case RESET_KIND_INIT:
941 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
942 APE_HOST_SEG_SIG_MAGIC);
943 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
944 APE_HOST_SEG_LEN_MAGIC);
945 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
946 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
947 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
948 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
949 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
950 APE_HOST_BEHAV_NO_PHYLOCK);
951 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
952 TG3_APE_HOST_DRVR_STATE_START);
954 event = APE_EVENT_STATUS_STATE_START;
956 case RESET_KIND_SHUTDOWN:
957 /* With the interface we are currently using,
958 * APE does not track driver state. Wiping
959 * out the HOST SEGMENT SIGNATURE forces
960 * the APE to assume OS absent status.
962 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
964 if (device_may_wakeup(&tp->pdev->dev) &&
965 tg3_flag(tp, WOL_ENABLE)) {
966 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
967 TG3_APE_HOST_WOL_SPEED_AUTO);
968 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
970 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
972 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
974 event = APE_EVENT_STATUS_STATE_UNLOAD;
980 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
982 tg3_ape_send_event(tp, event);
985 static void tg3_disable_ints(struct tg3 *tp)
989 tw32(TG3PCI_MISC_HOST_CTRL,
990 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
991 for (i = 0; i < tp->irq_max; i++)
992 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
995 static void tg3_enable_ints(struct tg3 *tp)
1002 tw32(TG3PCI_MISC_HOST_CTRL,
1003 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1005 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1006 for (i = 0; i < tp->irq_cnt; i++) {
1007 struct tg3_napi *tnapi = &tp->napi[i];
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010 if (tg3_flag(tp, 1SHOT_MSI))
1011 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013 tp->coal_now |= tnapi->coal_now;
1016 /* Force an initial interrupt */
1017 if (!tg3_flag(tp, TAGGED_STATUS) &&
1018 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1019 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1021 tw32(HOSTCC_MODE, tp->coal_now);
1023 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1026 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1028 struct tg3 *tp = tnapi->tp;
1029 struct tg3_hw_status *sblk = tnapi->hw_status;
1030 unsigned int work_exists = 0;
1032 /* check for phy events */
1033 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1034 if (sblk->status & SD_STATUS_LINK_CHG)
1038 /* check for TX work to do */
1039 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1042 /* check for RX work to do */
1043 if (tnapi->rx_rcb_prod_idx &&
1044 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1051 * similar to tg3_enable_ints, but it accurately determines whether there
1052 * is new work pending and can return without flushing the PIO write
1053 * which reenables interrupts
1055 static void tg3_int_reenable(struct tg3_napi *tnapi)
1057 struct tg3 *tp = tnapi->tp;
1059 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1062 /* When doing tagged status, this work check is unnecessary.
1063 * The last_tag we write above tells the chip which piece of
1064 * work we've completed.
1066 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1067 tw32(HOSTCC_MODE, tp->coalesce_mode |
1068 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1071 static void tg3_switch_clocks(struct tg3 *tp)
1074 u32 orig_clock_ctrl;
1076 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1079 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1081 orig_clock_ctrl = clock_ctrl;
1082 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1083 CLOCK_CTRL_CLKRUN_OENABLE |
1085 tp->pci_clock_ctrl = clock_ctrl;
1087 if (tg3_flag(tp, 5705_PLUS)) {
1088 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1089 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1090 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1092 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1093 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1095 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1097 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1098 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1101 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1104 #define PHY_BUSY_LOOPS 5000
1106 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1113 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1115 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119 tg3_ape_lock(tp, tp->phy_ape_lock);
1123 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1124 MI_COM_PHY_ADDR_MASK);
1125 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1126 MI_COM_REG_ADDR_MASK);
1127 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1129 tw32_f(MAC_MI_COM, frame_val);
1131 loops = PHY_BUSY_LOOPS;
1132 while (loops != 0) {
1134 frame_val = tr32(MAC_MI_COM);
1136 if ((frame_val & MI_COM_BUSY) == 0) {
1138 frame_val = tr32(MAC_MI_COM);
1146 *val = frame_val & MI_COM_DATA_MASK;
1150 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1151 tw32_f(MAC_MI_MODE, tp->mi_mode);
1155 tg3_ape_unlock(tp, tp->phy_ape_lock);
1160 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1162 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1165 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1172 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1173 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1176 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1178 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182 tg3_ape_lock(tp, tp->phy_ape_lock);
1184 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1185 MI_COM_PHY_ADDR_MASK);
1186 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1187 MI_COM_REG_ADDR_MASK);
1188 frame_val |= (val & MI_COM_DATA_MASK);
1189 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1191 tw32_f(MAC_MI_COM, frame_val);
1193 loops = PHY_BUSY_LOOPS;
1194 while (loops != 0) {
1196 frame_val = tr32(MAC_MI_COM);
1197 if ((frame_val & MI_COM_BUSY) == 0) {
1199 frame_val = tr32(MAC_MI_COM);
1209 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1210 tw32_f(MAC_MI_MODE, tp->mi_mode);
1214 tg3_ape_unlock(tp, tp->phy_ape_lock);
1219 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1221 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1224 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1237 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1247 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1260 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1270 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1276 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1281 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1287 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1292 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1297 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1298 MII_TG3_AUXCTL_SHDWSEL_MISC);
1300 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1305 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1307 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1308 set |= MII_TG3_AUXCTL_MISC_WREN;
1310 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1313 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1318 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1324 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1326 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1329 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1334 static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1336 return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1337 reg | val | MII_TG3_MISC_SHDW_WREN);
1340 static int tg3_bmcr_reset(struct tg3 *tp)
1345 /* OK, reset it, and poll the BMCR_RESET bit until it
1346 * clears or we time out.
1348 phy_control = BMCR_RESET;
1349 err = tg3_writephy(tp, MII_BMCR, phy_control);
1355 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359 if ((phy_control & BMCR_RESET) == 0) {
1371 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1373 struct tg3 *tp = bp->priv;
1376 spin_lock_bh(&tp->lock);
1378 if (__tg3_readphy(tp, mii_id, reg, &val))
1381 spin_unlock_bh(&tp->lock);
1386 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1388 struct tg3 *tp = bp->priv;
1391 spin_lock_bh(&tp->lock);
1393 if (__tg3_writephy(tp, mii_id, reg, val))
1396 spin_unlock_bh(&tp->lock);
1401 static int tg3_mdio_reset(struct mii_bus *bp)
1406 static void tg3_mdio_config_5785(struct tg3 *tp)
1409 struct phy_device *phydev;
1411 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1412 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1413 case PHY_ID_BCM50610:
1414 case PHY_ID_BCM50610M:
1415 val = MAC_PHYCFG2_50610_LED_MODES;
1417 case PHY_ID_BCMAC131:
1418 val = MAC_PHYCFG2_AC131_LED_MODES;
1420 case PHY_ID_RTL8211C:
1421 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1423 case PHY_ID_RTL8201E:
1424 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1430 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1431 tw32(MAC_PHYCFG2, val);
1433 val = tr32(MAC_PHYCFG1);
1434 val &= ~(MAC_PHYCFG1_RGMII_INT |
1435 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1436 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1437 tw32(MAC_PHYCFG1, val);
1442 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1443 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1444 MAC_PHYCFG2_FMODE_MASK_MASK |
1445 MAC_PHYCFG2_GMODE_MASK_MASK |
1446 MAC_PHYCFG2_ACT_MASK_MASK |
1447 MAC_PHYCFG2_QUAL_MASK_MASK |
1448 MAC_PHYCFG2_INBAND_ENABLE;
1450 tw32(MAC_PHYCFG2, val);
1452 val = tr32(MAC_PHYCFG1);
1453 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1454 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1455 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1456 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1457 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1458 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1459 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1461 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1462 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1463 tw32(MAC_PHYCFG1, val);
1465 val = tr32(MAC_EXT_RGMII_MODE);
1466 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1467 MAC_RGMII_MODE_RX_QUALITY |
1468 MAC_RGMII_MODE_RX_ACTIVITY |
1469 MAC_RGMII_MODE_RX_ENG_DET |
1470 MAC_RGMII_MODE_TX_ENABLE |
1471 MAC_RGMII_MODE_TX_LOWPWR |
1472 MAC_RGMII_MODE_TX_RESET);
1473 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1474 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1475 val |= MAC_RGMII_MODE_RX_INT_B |
1476 MAC_RGMII_MODE_RX_QUALITY |
1477 MAC_RGMII_MODE_RX_ACTIVITY |
1478 MAC_RGMII_MODE_RX_ENG_DET;
1479 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1480 val |= MAC_RGMII_MODE_TX_ENABLE |
1481 MAC_RGMII_MODE_TX_LOWPWR |
1482 MAC_RGMII_MODE_TX_RESET;
1484 tw32(MAC_EXT_RGMII_MODE, val);
1487 static void tg3_mdio_start(struct tg3 *tp)
1489 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1490 tw32_f(MAC_MI_MODE, tp->mi_mode);
1493 if (tg3_flag(tp, MDIOBUS_INITED) &&
1494 tg3_asic_rev(tp) == ASIC_REV_5785)
1495 tg3_mdio_config_5785(tp);
1498 static int tg3_mdio_init(struct tg3 *tp)
1502 struct phy_device *phydev;
1504 if (tg3_flag(tp, 5717_PLUS)) {
1507 tp->phy_addr = tp->pci_fn + 1;
1509 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1510 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1512 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1513 TG3_CPMU_PHY_STRAP_IS_SERDES;
1516 } else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1519 addr = ssb_gige_get_phyaddr(tp->pdev);
1522 tp->phy_addr = addr;
1524 tp->phy_addr = TG3_PHY_MII_ADDR;
1528 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1531 tp->mdio_bus = mdiobus_alloc();
1532 if (tp->mdio_bus == NULL)
1535 tp->mdio_bus->name = "tg3 mdio bus";
1536 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1537 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1538 tp->mdio_bus->priv = tp;
1539 tp->mdio_bus->parent = &tp->pdev->dev;
1540 tp->mdio_bus->read = &tg3_mdio_read;
1541 tp->mdio_bus->write = &tg3_mdio_write;
1542 tp->mdio_bus->reset = &tg3_mdio_reset;
1543 tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1544 tp->mdio_bus->irq = &tp->mdio_irq[0];
1546 for (i = 0; i < PHY_MAX_ADDR; i++)
1547 tp->mdio_bus->irq[i] = PHY_POLL;
1549 /* The bus registration will look for all the PHYs on the mdio bus.
1550 * Unfortunately, it does not ensure the PHY is powered up before
1551 * accessing the PHY ID registers. A chip reset is the
1552 * quickest way to bring the device back to an operational state..
1554 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1557 i = mdiobus_register(tp->mdio_bus);
1559 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1560 mdiobus_free(tp->mdio_bus);
1564 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1566 if (!phydev || !phydev->drv) {
1567 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1568 mdiobus_unregister(tp->mdio_bus);
1569 mdiobus_free(tp->mdio_bus);
1573 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1574 case PHY_ID_BCM57780:
1575 phydev->interface = PHY_INTERFACE_MODE_GMII;
1576 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1578 case PHY_ID_BCM50610:
1579 case PHY_ID_BCM50610M:
1580 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1581 PHY_BRCM_RX_REFCLK_UNUSED |
1582 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1583 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1584 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1585 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1586 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1587 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1588 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1589 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1591 case PHY_ID_RTL8211C:
1592 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1594 case PHY_ID_RTL8201E:
1595 case PHY_ID_BCMAC131:
1596 phydev->interface = PHY_INTERFACE_MODE_MII;
1597 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1598 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1602 tg3_flag_set(tp, MDIOBUS_INITED);
1604 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1605 tg3_mdio_config_5785(tp);
1610 static void tg3_mdio_fini(struct tg3 *tp)
1612 if (tg3_flag(tp, MDIOBUS_INITED)) {
1613 tg3_flag_clear(tp, MDIOBUS_INITED);
1614 mdiobus_unregister(tp->mdio_bus);
1615 mdiobus_free(tp->mdio_bus);
1619 /* tp->lock is held. */
1620 static inline void tg3_generate_fw_event(struct tg3 *tp)
1624 val = tr32(GRC_RX_CPU_EVENT);
1625 val |= GRC_RX_CPU_DRIVER_EVENT;
1626 tw32_f(GRC_RX_CPU_EVENT, val);
1628 tp->last_event_jiffies = jiffies;
1631 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1633 /* tp->lock is held. */
1634 static void tg3_wait_for_event_ack(struct tg3 *tp)
1637 unsigned int delay_cnt;
1640 /* If enough time has passed, no wait is necessary. */
1641 time_remain = (long)(tp->last_event_jiffies + 1 +
1642 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1644 if (time_remain < 0)
1647 /* Check if we can shorten the wait time. */
1648 delay_cnt = jiffies_to_usecs(time_remain);
1649 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1650 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1651 delay_cnt = (delay_cnt >> 3) + 1;
1653 for (i = 0; i < delay_cnt; i++) {
1654 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1656 if (pci_channel_offline(tp->pdev))
1663 /* tp->lock is held. */
1664 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1669 if (!tg3_readphy(tp, MII_BMCR, ®))
1671 if (!tg3_readphy(tp, MII_BMSR, ®))
1672 val |= (reg & 0xffff);
1676 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1678 if (!tg3_readphy(tp, MII_LPA, ®))
1679 val |= (reg & 0xffff);
1683 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1684 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1686 if (!tg3_readphy(tp, MII_STAT1000, ®))
1687 val |= (reg & 0xffff);
1691 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1698 /* tp->lock is held. */
1699 static void tg3_ump_link_report(struct tg3 *tp)
1703 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1706 tg3_phy_gather_ump_data(tp, data);
1708 tg3_wait_for_event_ack(tp);
1710 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1711 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1713 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1714 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1715 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1717 tg3_generate_fw_event(tp);
1720 /* tp->lock is held. */
1721 static void tg3_stop_fw(struct tg3 *tp)
1723 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1724 /* Wait for RX cpu to ACK the previous event. */
1725 tg3_wait_for_event_ack(tp);
1727 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1729 tg3_generate_fw_event(tp);
1731 /* Wait for RX cpu to ACK this event. */
1732 tg3_wait_for_event_ack(tp);
1736 /* tp->lock is held. */
1737 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1739 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1740 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1742 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1744 case RESET_KIND_INIT:
1745 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 case RESET_KIND_SHUTDOWN:
1750 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1754 case RESET_KIND_SUSPEND:
1755 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1765 /* tp->lock is held. */
1766 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1768 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1770 case RESET_KIND_INIT:
1771 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1772 DRV_STATE_START_DONE);
1775 case RESET_KIND_SHUTDOWN:
1776 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1777 DRV_STATE_UNLOAD_DONE);
1786 /* tp->lock is held. */
1787 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1789 if (tg3_flag(tp, ENABLE_ASF)) {
1791 case RESET_KIND_INIT:
1792 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1796 case RESET_KIND_SHUTDOWN:
1797 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1801 case RESET_KIND_SUSPEND:
1802 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1812 static int tg3_poll_fw(struct tg3 *tp)
1817 if (tg3_flag(tp, NO_FWARE_REPORTED))
1820 if (tg3_flag(tp, IS_SSB_CORE)) {
1821 /* We don't use firmware. */
1825 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1826 /* Wait up to 20ms for init done. */
1827 for (i = 0; i < 200; i++) {
1828 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1830 if (pci_channel_offline(tp->pdev))
1838 /* Wait for firmware initialization to complete. */
1839 for (i = 0; i < 100000; i++) {
1840 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1841 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1843 if (pci_channel_offline(tp->pdev)) {
1844 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1845 tg3_flag_set(tp, NO_FWARE_REPORTED);
1846 netdev_info(tp->dev, "No firmware running\n");
1855 /* Chip might not be fitted with firmware. Some Sun onboard
1856 * parts are configured like that. So don't signal the timeout
1857 * of the above loop as an error, but do report the lack of
1858 * running firmware once.
1860 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1861 tg3_flag_set(tp, NO_FWARE_REPORTED);
1863 netdev_info(tp->dev, "No firmware running\n");
1866 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1867 /* The 57765 A0 needs a little more
1868 * time to do some important work.
1876 static void tg3_link_report(struct tg3 *tp)
1878 if (!netif_carrier_ok(tp->dev)) {
1879 netif_info(tp, link, tp->dev, "Link is down\n");
1880 tg3_ump_link_report(tp);
1881 } else if (netif_msg_link(tp)) {
1882 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1883 (tp->link_config.active_speed == SPEED_1000 ?
1885 (tp->link_config.active_speed == SPEED_100 ?
1887 (tp->link_config.active_duplex == DUPLEX_FULL ?
1890 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1891 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1893 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1896 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1897 netdev_info(tp->dev, "EEE is %s\n",
1898 tp->setlpicnt ? "enabled" : "disabled");
1900 tg3_ump_link_report(tp);
1903 tp->link_up = netif_carrier_ok(tp->dev);
1906 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1910 if (adv & ADVERTISE_PAUSE_CAP) {
1911 flowctrl |= FLOW_CTRL_RX;
1912 if (!(adv & ADVERTISE_PAUSE_ASYM))
1913 flowctrl |= FLOW_CTRL_TX;
1914 } else if (adv & ADVERTISE_PAUSE_ASYM)
1915 flowctrl |= FLOW_CTRL_TX;
1920 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1924 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1925 miireg = ADVERTISE_1000XPAUSE;
1926 else if (flow_ctrl & FLOW_CTRL_TX)
1927 miireg = ADVERTISE_1000XPSE_ASYM;
1928 else if (flow_ctrl & FLOW_CTRL_RX)
1929 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1936 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1940 if (adv & ADVERTISE_1000XPAUSE) {
1941 flowctrl |= FLOW_CTRL_RX;
1942 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1943 flowctrl |= FLOW_CTRL_TX;
1944 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1945 flowctrl |= FLOW_CTRL_TX;
1950 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1954 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1955 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1956 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1957 if (lcladv & ADVERTISE_1000XPAUSE)
1959 if (rmtadv & ADVERTISE_1000XPAUSE)
1966 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1970 u32 old_rx_mode = tp->rx_mode;
1971 u32 old_tx_mode = tp->tx_mode;
1973 if (tg3_flag(tp, USE_PHYLIB))
1974 autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1976 autoneg = tp->link_config.autoneg;
1978 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1979 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1980 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1982 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1984 flowctrl = tp->link_config.flowctrl;
1986 tp->link_config.active_flowctrl = flowctrl;
1988 if (flowctrl & FLOW_CTRL_RX)
1989 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1991 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1993 if (old_rx_mode != tp->rx_mode)
1994 tw32_f(MAC_RX_MODE, tp->rx_mode);
1996 if (flowctrl & FLOW_CTRL_TX)
1997 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1999 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
2001 if (old_tx_mode != tp->tx_mode)
2002 tw32_f(MAC_TX_MODE, tp->tx_mode);
2005 static void tg3_adjust_link(struct net_device *dev)
2007 u8 oldflowctrl, linkmesg = 0;
2008 u32 mac_mode, lcl_adv, rmt_adv;
2009 struct tg3 *tp = netdev_priv(dev);
2010 struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2012 spin_lock_bh(&tp->lock);
2014 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2015 MAC_MODE_HALF_DUPLEX);
2017 oldflowctrl = tp->link_config.active_flowctrl;
2023 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2024 mac_mode |= MAC_MODE_PORT_MODE_MII;
2025 else if (phydev->speed == SPEED_1000 ||
2026 tg3_asic_rev(tp) != ASIC_REV_5785)
2027 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2029 mac_mode |= MAC_MODE_PORT_MODE_MII;
2031 if (phydev->duplex == DUPLEX_HALF)
2032 mac_mode |= MAC_MODE_HALF_DUPLEX;
2034 lcl_adv = mii_advertise_flowctrl(
2035 tp->link_config.flowctrl);
2038 rmt_adv = LPA_PAUSE_CAP;
2039 if (phydev->asym_pause)
2040 rmt_adv |= LPA_PAUSE_ASYM;
2043 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2045 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2047 if (mac_mode != tp->mac_mode) {
2048 tp->mac_mode = mac_mode;
2049 tw32_f(MAC_MODE, tp->mac_mode);
2053 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2054 if (phydev->speed == SPEED_10)
2056 MAC_MI_STAT_10MBPS_MODE |
2057 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2059 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2062 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2063 tw32(MAC_TX_LENGTHS,
2064 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2065 (6 << TX_LENGTHS_IPG_SHIFT) |
2066 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2068 tw32(MAC_TX_LENGTHS,
2069 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2070 (6 << TX_LENGTHS_IPG_SHIFT) |
2071 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2073 if (phydev->link != tp->old_link ||
2074 phydev->speed != tp->link_config.active_speed ||
2075 phydev->duplex != tp->link_config.active_duplex ||
2076 oldflowctrl != tp->link_config.active_flowctrl)
2079 tp->old_link = phydev->link;
2080 tp->link_config.active_speed = phydev->speed;
2081 tp->link_config.active_duplex = phydev->duplex;
2083 spin_unlock_bh(&tp->lock);
2086 tg3_link_report(tp);
2089 static int tg3_phy_init(struct tg3 *tp)
2091 struct phy_device *phydev;
2093 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2096 /* Bring the PHY back to a known state. */
2099 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2101 /* Attach the MAC to the PHY. */
2102 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2103 tg3_adjust_link, phydev->interface);
2104 if (IS_ERR(phydev)) {
2105 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2106 return PTR_ERR(phydev);
2109 /* Mask with MAC supported features. */
2110 switch (phydev->interface) {
2111 case PHY_INTERFACE_MODE_GMII:
2112 case PHY_INTERFACE_MODE_RGMII:
2113 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2114 phydev->supported &= (PHY_GBIT_FEATURES |
2116 SUPPORTED_Asym_Pause);
2120 case PHY_INTERFACE_MODE_MII:
2121 phydev->supported &= (PHY_BASIC_FEATURES |
2123 SUPPORTED_Asym_Pause);
2126 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2130 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2132 phydev->advertising = phydev->supported;
2137 static void tg3_phy_start(struct tg3 *tp)
2139 struct phy_device *phydev;
2141 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2144 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2146 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2147 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2148 phydev->speed = tp->link_config.speed;
2149 phydev->duplex = tp->link_config.duplex;
2150 phydev->autoneg = tp->link_config.autoneg;
2151 phydev->advertising = tp->link_config.advertising;
2156 phy_start_aneg(phydev);
2159 static void tg3_phy_stop(struct tg3 *tp)
2161 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2164 phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2167 static void tg3_phy_fini(struct tg3 *tp)
2169 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2170 phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2171 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2175 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2180 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2183 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2184 /* Cannot do read-modify-write on 5401 */
2185 err = tg3_phy_auxctl_write(tp,
2186 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2187 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2192 err = tg3_phy_auxctl_read(tp,
2193 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2197 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2198 err = tg3_phy_auxctl_write(tp,
2199 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2205 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2209 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2212 tg3_writephy(tp, MII_TG3_FET_TEST,
2213 phytest | MII_TG3_FET_SHADOW_EN);
2214 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2216 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2218 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2219 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2221 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2225 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2229 if (!tg3_flag(tp, 5705_PLUS) ||
2230 (tg3_flag(tp, 5717_PLUS) &&
2231 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2234 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2235 tg3_phy_fet_toggle_apd(tp, enable);
2239 reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2240 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2241 MII_TG3_MISC_SHDW_SCR5_SDTL |
2242 MII_TG3_MISC_SHDW_SCR5_C125OE;
2243 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2244 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2246 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2249 reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2251 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2253 tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2256 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2260 if (!tg3_flag(tp, 5705_PLUS) ||
2261 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2264 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2267 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2268 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2270 tg3_writephy(tp, MII_TG3_FET_TEST,
2271 ephy | MII_TG3_FET_SHADOW_EN);
2272 if (!tg3_readphy(tp, reg, &phy)) {
2274 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2276 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2277 tg3_writephy(tp, reg, phy);
2279 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2284 ret = tg3_phy_auxctl_read(tp,
2285 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2288 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2290 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2291 tg3_phy_auxctl_write(tp,
2292 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2297 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2302 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2305 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2307 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2308 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2311 static void tg3_phy_apply_otp(struct tg3 *tp)
2320 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2323 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2324 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2325 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2327 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2328 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2331 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2332 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2333 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2335 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2336 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2338 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2341 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2342 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2343 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2345 tg3_phy_toggle_auxctl_smdsp(tp, false);
2348 static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2351 struct ethtool_eee *dest = &tp->eee;
2353 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2359 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2362 /* Pull eee_active */
2363 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2364 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2365 dest->eee_active = 1;
2367 dest->eee_active = 0;
2369 /* Pull lp advertised settings */
2370 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2372 dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2374 /* Pull advertised and eee_enabled settings */
2375 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2377 dest->eee_enabled = !!val;
2378 dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2380 /* Pull tx_lpi_enabled */
2381 val = tr32(TG3_CPMU_EEE_MODE);
2382 dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2384 /* Pull lpi timer value */
2385 dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2388 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2392 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2397 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2399 tp->link_config.active_duplex == DUPLEX_FULL &&
2400 (tp->link_config.active_speed == SPEED_100 ||
2401 tp->link_config.active_speed == SPEED_1000)) {
2404 if (tp->link_config.active_speed == SPEED_1000)
2405 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2407 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2409 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2411 tg3_eee_pull_config(tp, NULL);
2412 if (tp->eee.eee_active)
2416 if (!tp->setlpicnt) {
2417 if (current_link_up &&
2418 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2419 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2420 tg3_phy_toggle_auxctl_smdsp(tp, false);
2423 val = tr32(TG3_CPMU_EEE_MODE);
2424 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2428 static void tg3_phy_eee_enable(struct tg3 *tp)
2432 if (tp->link_config.active_speed == SPEED_1000 &&
2433 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2434 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2435 tg3_flag(tp, 57765_CLASS)) &&
2436 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2437 val = MII_TG3_DSP_TAP26_ALNOKO |
2438 MII_TG3_DSP_TAP26_RMRXSTO;
2439 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2440 tg3_phy_toggle_auxctl_smdsp(tp, false);
2443 val = tr32(TG3_CPMU_EEE_MODE);
2444 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2447 static int tg3_wait_macro_done(struct tg3 *tp)
2454 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2455 if ((tmp32 & 0x1000) == 0)
2465 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2467 static const u32 test_pat[4][6] = {
2468 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2469 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2470 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2471 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2475 for (chan = 0; chan < 4; chan++) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2479 (chan * 0x2000) | 0x0200);
2480 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2482 for (i = 0; i < 6; i++)
2483 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2486 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2487 if (tg3_wait_macro_done(tp)) {
2492 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2493 (chan * 0x2000) | 0x0200);
2494 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2495 if (tg3_wait_macro_done(tp)) {
2500 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2501 if (tg3_wait_macro_done(tp)) {
2506 for (i = 0; i < 6; i += 2) {
2509 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2510 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2511 tg3_wait_macro_done(tp)) {
2517 if (low != test_pat[chan][i] ||
2518 high != test_pat[chan][i+1]) {
2519 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2520 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2521 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2531 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2535 for (chan = 0; chan < 4; chan++) {
2538 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2539 (chan * 0x2000) | 0x0200);
2540 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2541 for (i = 0; i < 6; i++)
2542 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2543 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2544 if (tg3_wait_macro_done(tp))
2551 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2553 u32 reg32, phy9_orig;
2554 int retries, do_phy_reset, err;
2560 err = tg3_bmcr_reset(tp);
2566 /* Disable transmitter and interrupt. */
2567 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2571 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2573 /* Set full-duplex, 1000 mbps. */
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_FULLDPLX | BMCR_SPEED1000);
2577 /* Set to master mode. */
2578 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2581 tg3_writephy(tp, MII_CTRL1000,
2582 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2584 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2588 /* Block the PHY control access. */
2589 tg3_phydsp_write(tp, 0x8005, 0x0800);
2591 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2594 } while (--retries);
2596 err = tg3_phy_reset_chanpat(tp);
2600 tg3_phydsp_write(tp, 0x8005, 0x0000);
2602 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2603 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2605 tg3_phy_toggle_auxctl_smdsp(tp, false);
2607 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2609 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2611 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2618 static void tg3_carrier_off(struct tg3 *tp)
2620 netif_carrier_off(tp->dev);
2621 tp->link_up = false;
2624 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2626 if (tg3_flag(tp, ENABLE_ASF))
2627 netdev_warn(tp->dev,
2628 "Management side-band traffic will be interrupted during phy settings change\n");
2631 /* This will reset the tigon3 PHY if there is no valid
2632 * link unless the FORCE argument is non-zero.
2634 static int tg3_phy_reset(struct tg3 *tp)
2639 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2640 val = tr32(GRC_MISC_CFG);
2641 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2644 err = tg3_readphy(tp, MII_BMSR, &val);
2645 err |= tg3_readphy(tp, MII_BMSR, &val);
2649 if (netif_running(tp->dev) && tp->link_up) {
2650 netif_carrier_off(tp->dev);
2651 tg3_link_report(tp);
2654 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2655 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2656 tg3_asic_rev(tp) == ASIC_REV_5705) {
2657 err = tg3_phy_reset_5703_4_5(tp);
2664 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2665 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2666 cpmuctrl = tr32(TG3_CPMU_CTRL);
2667 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2669 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2672 err = tg3_bmcr_reset(tp);
2676 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2677 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2678 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2680 tw32(TG3_CPMU_CTRL, cpmuctrl);
2683 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2684 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2685 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2686 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2687 CPMU_LSPD_1000MB_MACCLK_12_5) {
2688 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2690 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2694 if (tg3_flag(tp, 5717_PLUS) &&
2695 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2698 tg3_phy_apply_otp(tp);
2700 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2701 tg3_phy_toggle_apd(tp, true);
2703 tg3_phy_toggle_apd(tp, false);
2706 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2707 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2708 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2709 tg3_phydsp_write(tp, 0x000a, 0x0323);
2710 tg3_phy_toggle_auxctl_smdsp(tp, false);
2713 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2714 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2715 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2718 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2719 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2720 tg3_phydsp_write(tp, 0x000a, 0x310b);
2721 tg3_phydsp_write(tp, 0x201f, 0x9506);
2722 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2723 tg3_phy_toggle_auxctl_smdsp(tp, false);
2725 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2726 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2727 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2728 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2729 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2730 tg3_writephy(tp, MII_TG3_TEST1,
2731 MII_TG3_TEST1_TRIM_EN | 0x4);
2733 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2735 tg3_phy_toggle_auxctl_smdsp(tp, false);
2739 /* Set Extended packet length bit (bit 14) on all chips that */
2740 /* support jumbo frames */
2741 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2742 /* Cannot do read-modify-write on 5401 */
2743 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2744 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2745 /* Set bit 14 with read-modify-write to preserve other bits */
2746 err = tg3_phy_auxctl_read(tp,
2747 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2749 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2750 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2753 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2754 * jumbo frames transmission.
2756 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2757 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2758 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2759 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2762 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2763 /* adjust output voltage */
2764 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2767 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2768 tg3_phydsp_write(tp, 0xffb, 0x4000);
2770 tg3_phy_toggle_automdix(tp, true);
2771 tg3_phy_set_wirespeed(tp);
2775 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2776 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2777 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2778 TG3_GPIO_MSG_NEED_VAUX)
2779 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2780 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2781 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2782 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2783 (TG3_GPIO_MSG_DRVR_PRES << 12))
2785 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2786 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2787 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2788 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2789 (TG3_GPIO_MSG_NEED_VAUX << 12))
2791 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2795 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2796 tg3_asic_rev(tp) == ASIC_REV_5719)
2797 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2799 status = tr32(TG3_CPMU_DRV_STATUS);
2801 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2802 status &= ~(TG3_GPIO_MSG_MASK << shift);
2803 status |= (newstat << shift);
2805 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2806 tg3_asic_rev(tp) == ASIC_REV_5719)
2807 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2809 tw32(TG3_CPMU_DRV_STATUS, status);
2811 return status >> TG3_APE_GPIO_MSG_SHIFT;
2814 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2816 if (!tg3_flag(tp, IS_NIC))
2819 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2820 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2821 tg3_asic_rev(tp) == ASIC_REV_5720) {
2822 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2825 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2827 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2828 TG3_GRC_LCLCTL_PWRSW_DELAY);
2830 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2832 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2833 TG3_GRC_LCLCTL_PWRSW_DELAY);
2839 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2843 if (!tg3_flag(tp, IS_NIC) ||
2844 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2845 tg3_asic_rev(tp) == ASIC_REV_5701)
2848 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2850 tw32_wait_f(GRC_LOCAL_CTRL,
2851 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2852 TG3_GRC_LCLCTL_PWRSW_DELAY);
2854 tw32_wait_f(GRC_LOCAL_CTRL,
2856 TG3_GRC_LCLCTL_PWRSW_DELAY);
2858 tw32_wait_f(GRC_LOCAL_CTRL,
2859 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2860 TG3_GRC_LCLCTL_PWRSW_DELAY);
2863 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2865 if (!tg3_flag(tp, IS_NIC))
2868 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2869 tg3_asic_rev(tp) == ASIC_REV_5701) {
2870 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2871 (GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT0 |
2875 GRC_LCLCTRL_GPIO_OUTPUT1),
2876 TG3_GRC_LCLCTL_PWRSW_DELAY);
2877 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2878 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2879 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2880 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2881 GRC_LCLCTRL_GPIO_OE1 |
2882 GRC_LCLCTRL_GPIO_OE2 |
2883 GRC_LCLCTRL_GPIO_OUTPUT0 |
2884 GRC_LCLCTRL_GPIO_OUTPUT1 |
2886 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2887 TG3_GRC_LCLCTL_PWRSW_DELAY);
2889 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2890 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2891 TG3_GRC_LCLCTL_PWRSW_DELAY);
2893 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2894 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2895 TG3_GRC_LCLCTL_PWRSW_DELAY);
2898 u32 grc_local_ctrl = 0;
2900 /* Workaround to prevent overdrawing Amps. */
2901 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2902 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2903 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2905 TG3_GRC_LCLCTL_PWRSW_DELAY);
2908 /* On 5753 and variants, GPIO2 cannot be used. */
2909 no_gpio2 = tp->nic_sram_data_cfg &
2910 NIC_SRAM_DATA_CFG_NO_GPIO2;
2912 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2913 GRC_LCLCTRL_GPIO_OE1 |
2914 GRC_LCLCTRL_GPIO_OE2 |
2915 GRC_LCLCTRL_GPIO_OUTPUT1 |
2916 GRC_LCLCTRL_GPIO_OUTPUT2;
2918 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2919 GRC_LCLCTRL_GPIO_OUTPUT2);
2921 tw32_wait_f(GRC_LOCAL_CTRL,
2922 tp->grc_local_ctrl | grc_local_ctrl,
2923 TG3_GRC_LCLCTL_PWRSW_DELAY);
2925 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2927 tw32_wait_f(GRC_LOCAL_CTRL,
2928 tp->grc_local_ctrl | grc_local_ctrl,
2929 TG3_GRC_LCLCTL_PWRSW_DELAY);
2932 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2933 tw32_wait_f(GRC_LOCAL_CTRL,
2934 tp->grc_local_ctrl | grc_local_ctrl,
2935 TG3_GRC_LCLCTL_PWRSW_DELAY);
2940 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2944 /* Serialize power state transitions */
2945 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2948 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2949 msg = TG3_GPIO_MSG_NEED_VAUX;
2951 msg = tg3_set_function_status(tp, msg);
2953 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2956 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2957 tg3_pwrsrc_switch_to_vaux(tp);
2959 tg3_pwrsrc_die_with_vmain(tp);
2962 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2965 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2967 bool need_vaux = false;
2969 /* The GPIOs do something completely different on 57765. */
2970 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2973 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2974 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2975 tg3_asic_rev(tp) == ASIC_REV_5720) {
2976 tg3_frob_aux_power_5717(tp, include_wol ?
2977 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2981 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2982 struct net_device *dev_peer;
2984 dev_peer = pci_get_drvdata(tp->pdev_peer);
2986 /* remove_one() may have been run on the peer. */
2988 struct tg3 *tp_peer = netdev_priv(dev_peer);
2990 if (tg3_flag(tp_peer, INIT_COMPLETE))
2993 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2994 tg3_flag(tp_peer, ENABLE_ASF))
2999 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
3000 tg3_flag(tp, ENABLE_ASF))
3004 tg3_pwrsrc_switch_to_vaux(tp);
3006 tg3_pwrsrc_die_with_vmain(tp);
3009 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3011 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3013 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3014 if (speed != SPEED_10)
3016 } else if (speed == SPEED_10)
3022 static bool tg3_phy_power_bug(struct tg3 *tp)
3024 switch (tg3_asic_rev(tp)) {
3029 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3038 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3047 static bool tg3_phy_led_bug(struct tg3 *tp)
3049 switch (tg3_asic_rev(tp)) {
3052 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3061 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3065 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3068 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3069 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3070 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3071 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3074 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3075 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3076 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3081 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3083 val = tr32(GRC_MISC_CFG);
3084 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3087 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3089 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3092 tg3_writephy(tp, MII_ADVERTISE, 0);
3093 tg3_writephy(tp, MII_BMCR,
3094 BMCR_ANENABLE | BMCR_ANRESTART);
3096 tg3_writephy(tp, MII_TG3_FET_TEST,
3097 phytest | MII_TG3_FET_SHADOW_EN);
3098 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3099 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3101 MII_TG3_FET_SHDW_AUXMODE4,
3104 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3107 } else if (do_low_power) {
3108 if (!tg3_phy_led_bug(tp))
3109 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3110 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3112 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3113 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3114 MII_TG3_AUXCTL_PCTL_VREG_11V;
3115 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3118 /* The PHY should not be powered down on some chips because
3121 if (tg3_phy_power_bug(tp))
3124 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3125 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3126 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3127 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3128 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3129 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3132 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3135 /* tp->lock is held. */
3136 static int tg3_nvram_lock(struct tg3 *tp)
3138 if (tg3_flag(tp, NVRAM)) {
3141 if (tp->nvram_lock_cnt == 0) {
3142 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3143 for (i = 0; i < 8000; i++) {
3144 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3149 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3153 tp->nvram_lock_cnt++;
3158 /* tp->lock is held. */
3159 static void tg3_nvram_unlock(struct tg3 *tp)
3161 if (tg3_flag(tp, NVRAM)) {
3162 if (tp->nvram_lock_cnt > 0)
3163 tp->nvram_lock_cnt--;
3164 if (tp->nvram_lock_cnt == 0)
3165 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3169 /* tp->lock is held. */
3170 static void tg3_enable_nvram_access(struct tg3 *tp)
3172 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3173 u32 nvaccess = tr32(NVRAM_ACCESS);
3175 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3179 /* tp->lock is held. */
3180 static void tg3_disable_nvram_access(struct tg3 *tp)
3182 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3183 u32 nvaccess = tr32(NVRAM_ACCESS);
3185 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3189 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3190 u32 offset, u32 *val)
3195 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3198 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3199 EEPROM_ADDR_DEVID_MASK |
3201 tw32(GRC_EEPROM_ADDR,
3203 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3204 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3205 EEPROM_ADDR_ADDR_MASK) |
3206 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3208 for (i = 0; i < 1000; i++) {
3209 tmp = tr32(GRC_EEPROM_ADDR);
3211 if (tmp & EEPROM_ADDR_COMPLETE)
3215 if (!(tmp & EEPROM_ADDR_COMPLETE))
3218 tmp = tr32(GRC_EEPROM_DATA);
3221 * The data will always be opposite the native endian
3222 * format. Perform a blind byteswap to compensate.
3229 #define NVRAM_CMD_TIMEOUT 10000
3231 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3235 tw32(NVRAM_CMD, nvram_cmd);
3236 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3238 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3244 if (i == NVRAM_CMD_TIMEOUT)
3250 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3252 if (tg3_flag(tp, NVRAM) &&
3253 tg3_flag(tp, NVRAM_BUFFERED) &&
3254 tg3_flag(tp, FLASH) &&
3255 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3256 (tp->nvram_jedecnum == JEDEC_ATMEL))
3258 addr = ((addr / tp->nvram_pagesize) <<
3259 ATMEL_AT45DB0X1B_PAGE_POS) +
3260 (addr % tp->nvram_pagesize);
3265 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3267 if (tg3_flag(tp, NVRAM) &&
3268 tg3_flag(tp, NVRAM_BUFFERED) &&
3269 tg3_flag(tp, FLASH) &&
3270 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3271 (tp->nvram_jedecnum == JEDEC_ATMEL))
3273 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3274 tp->nvram_pagesize) +
3275 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3280 /* NOTE: Data read in from NVRAM is byteswapped according to
3281 * the byteswapping settings for all other register accesses.
3282 * tg3 devices are BE devices, so on a BE machine, the data
3283 * returned will be exactly as it is seen in NVRAM. On a LE
3284 * machine, the 32-bit value will be byteswapped.
3286 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3290 if (!tg3_flag(tp, NVRAM))
3291 return tg3_nvram_read_using_eeprom(tp, offset, val);
3293 offset = tg3_nvram_phys_addr(tp, offset);
3295 if (offset > NVRAM_ADDR_MSK)
3298 ret = tg3_nvram_lock(tp);
3302 tg3_enable_nvram_access(tp);
3304 tw32(NVRAM_ADDR, offset);
3305 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3306 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3309 *val = tr32(NVRAM_RDDATA);
3311 tg3_disable_nvram_access(tp);
3313 tg3_nvram_unlock(tp);
3318 /* Ensures NVRAM data is in bytestream format. */
3319 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3322 int res = tg3_nvram_read(tp, offset, &v);
3324 *val = cpu_to_be32(v);
3328 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3329 u32 offset, u32 len, u8 *buf)
3334 for (i = 0; i < len; i += 4) {
3340 memcpy(&data, buf + i, 4);
3343 * The SEEPROM interface expects the data to always be opposite
3344 * the native endian format. We accomplish this by reversing
3345 * all the operations that would have been performed on the
3346 * data from a call to tg3_nvram_read_be32().
3348 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3350 val = tr32(GRC_EEPROM_ADDR);
3351 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3353 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3355 tw32(GRC_EEPROM_ADDR, val |
3356 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3357 (addr & EEPROM_ADDR_ADDR_MASK) |
3361 for (j = 0; j < 1000; j++) {
3362 val = tr32(GRC_EEPROM_ADDR);
3364 if (val & EEPROM_ADDR_COMPLETE)
3368 if (!(val & EEPROM_ADDR_COMPLETE)) {
3377 /* offset and length are dword aligned */
3378 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3382 u32 pagesize = tp->nvram_pagesize;
3383 u32 pagemask = pagesize - 1;
3387 tmp = kmalloc(pagesize, GFP_KERNEL);
3393 u32 phy_addr, page_off, size;
3395 phy_addr = offset & ~pagemask;
3397 for (j = 0; j < pagesize; j += 4) {
3398 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3399 (__be32 *) (tmp + j));
3406 page_off = offset & pagemask;
3413 memcpy(tmp + page_off, buf, size);
3415 offset = offset + (pagesize - page_off);
3417 tg3_enable_nvram_access(tp);
3420 * Before we can erase the flash page, we need
3421 * to issue a special "write enable" command.
3423 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3425 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3428 /* Erase the target page */
3429 tw32(NVRAM_ADDR, phy_addr);
3431 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3432 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3434 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3437 /* Issue another write enable to start the write. */
3438 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3440 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3443 for (j = 0; j < pagesize; j += 4) {
3446 data = *((__be32 *) (tmp + j));
3448 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3450 tw32(NVRAM_ADDR, phy_addr + j);
3452 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3456 nvram_cmd |= NVRAM_CMD_FIRST;
3457 else if (j == (pagesize - 4))
3458 nvram_cmd |= NVRAM_CMD_LAST;
3460 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3468 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3469 tg3_nvram_exec_cmd(tp, nvram_cmd);
3476 /* offset and length are dword aligned */
3477 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3482 for (i = 0; i < len; i += 4, offset += 4) {
3483 u32 page_off, phy_addr, nvram_cmd;
3486 memcpy(&data, buf + i, 4);
3487 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3489 page_off = offset % tp->nvram_pagesize;
3491 phy_addr = tg3_nvram_phys_addr(tp, offset);
3493 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3495 if (page_off == 0 || i == 0)
3496 nvram_cmd |= NVRAM_CMD_FIRST;
3497 if (page_off == (tp->nvram_pagesize - 4))
3498 nvram_cmd |= NVRAM_CMD_LAST;
3501 nvram_cmd |= NVRAM_CMD_LAST;
3503 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3504 !tg3_flag(tp, FLASH) ||
3505 !tg3_flag(tp, 57765_PLUS))
3506 tw32(NVRAM_ADDR, phy_addr);
3508 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3509 !tg3_flag(tp, 5755_PLUS) &&
3510 (tp->nvram_jedecnum == JEDEC_ST) &&
3511 (nvram_cmd & NVRAM_CMD_FIRST)) {
3514 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3515 ret = tg3_nvram_exec_cmd(tp, cmd);
3519 if (!tg3_flag(tp, FLASH)) {
3520 /* We always do complete word writes to eeprom. */
3521 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3524 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3531 /* offset and length are dword aligned */
3532 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3536 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3537 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3538 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3542 if (!tg3_flag(tp, NVRAM)) {
3543 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3547 ret = tg3_nvram_lock(tp);
3551 tg3_enable_nvram_access(tp);
3552 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3553 tw32(NVRAM_WRITE1, 0x406);
3555 grc_mode = tr32(GRC_MODE);
3556 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3558 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3559 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3562 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3566 grc_mode = tr32(GRC_MODE);
3567 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3569 tg3_disable_nvram_access(tp);
3570 tg3_nvram_unlock(tp);
3573 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3574 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3581 #define RX_CPU_SCRATCH_BASE 0x30000
3582 #define RX_CPU_SCRATCH_SIZE 0x04000
3583 #define TX_CPU_SCRATCH_BASE 0x34000
3584 #define TX_CPU_SCRATCH_SIZE 0x04000
3586 /* tp->lock is held. */
3587 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3590 const int iters = 10000;
3592 for (i = 0; i < iters; i++) {
3593 tw32(cpu_base + CPU_STATE, 0xffffffff);
3594 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3595 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3597 if (pci_channel_offline(tp->pdev))
3601 return (i == iters) ? -EBUSY : 0;
3604 /* tp->lock is held. */
3605 static int tg3_rxcpu_pause(struct tg3 *tp)
3607 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3609 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3610 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3616 /* tp->lock is held. */
3617 static int tg3_txcpu_pause(struct tg3 *tp)
3619 return tg3_pause_cpu(tp, TX_CPU_BASE);
3622 /* tp->lock is held. */
3623 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3625 tw32(cpu_base + CPU_STATE, 0xffffffff);
3626 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3629 /* tp->lock is held. */
3630 static void tg3_rxcpu_resume(struct tg3 *tp)
3632 tg3_resume_cpu(tp, RX_CPU_BASE);
3635 /* tp->lock is held. */
3636 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3640 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3642 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3643 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3645 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3648 if (cpu_base == RX_CPU_BASE) {
3649 rc = tg3_rxcpu_pause(tp);
3652 * There is only an Rx CPU for the 5750 derivative in the
3655 if (tg3_flag(tp, IS_SSB_CORE))
3658 rc = tg3_txcpu_pause(tp);
3662 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3663 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3667 /* Clear firmware's nvram arbitration. */
3668 if (tg3_flag(tp, NVRAM))
3669 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3673 static int tg3_fw_data_len(struct tg3 *tp,
3674 const struct tg3_firmware_hdr *fw_hdr)
3678 /* Non fragmented firmware have one firmware header followed by a
3679 * contiguous chunk of data to be written. The length field in that
3680 * header is not the length of data to be written but the complete
3681 * length of the bss. The data length is determined based on
3682 * tp->fw->size minus headers.
3684 * Fragmented firmware have a main header followed by multiple
3685 * fragments. Each fragment is identical to non fragmented firmware
3686 * with a firmware header followed by a contiguous chunk of data. In
3687 * the main header, the length field is unused and set to 0xffffffff.
3688 * In each fragment header the length is the entire size of that
3689 * fragment i.e. fragment data + header length. Data length is
3690 * therefore length field in the header minus TG3_FW_HDR_LEN.
3692 if (tp->fw_len == 0xffffffff)
3693 fw_len = be32_to_cpu(fw_hdr->len);
3695 fw_len = tp->fw->size;
3697 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3700 /* tp->lock is held. */
3701 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3702 u32 cpu_scratch_base, int cpu_scratch_size,
3703 const struct tg3_firmware_hdr *fw_hdr)
3706 void (*write_op)(struct tg3 *, u32, u32);
3707 int total_len = tp->fw->size;
3709 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3711 "%s: Trying to load TX cpu firmware which is 5705\n",
3716 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3717 write_op = tg3_write_mem;
3719 write_op = tg3_write_indirect_reg32;
3721 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3722 /* It is possible that bootcode is still loading at this point.
3723 * Get the nvram lock first before halting the cpu.
3725 int lock_err = tg3_nvram_lock(tp);
3726 err = tg3_halt_cpu(tp, cpu_base);
3728 tg3_nvram_unlock(tp);
3732 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3733 write_op(tp, cpu_scratch_base + i, 0);
3734 tw32(cpu_base + CPU_STATE, 0xffffffff);
3735 tw32(cpu_base + CPU_MODE,
3736 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3738 /* Subtract additional main header for fragmented firmware and
3739 * advance to the first fragment
3741 total_len -= TG3_FW_HDR_LEN;
3746 u32 *fw_data = (u32 *)(fw_hdr + 1);
3747 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3748 write_op(tp, cpu_scratch_base +
3749 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3751 be32_to_cpu(fw_data[i]));
3753 total_len -= be32_to_cpu(fw_hdr->len);
3755 /* Advance to next fragment */
3756 fw_hdr = (struct tg3_firmware_hdr *)
3757 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3758 } while (total_len > 0);
3766 /* tp->lock is held. */
3767 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3770 const int iters = 5;
3772 tw32(cpu_base + CPU_STATE, 0xffffffff);
3773 tw32_f(cpu_base + CPU_PC, pc);
3775 for (i = 0; i < iters; i++) {
3776 if (tr32(cpu_base + CPU_PC) == pc)
3778 tw32(cpu_base + CPU_STATE, 0xffffffff);
3779 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3780 tw32_f(cpu_base + CPU_PC, pc);
3784 return (i == iters) ? -EBUSY : 0;
3787 /* tp->lock is held. */
3788 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3790 const struct tg3_firmware_hdr *fw_hdr;
3793 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3795 /* Firmware blob starts with version numbers, followed by
3796 start address and length. We are setting complete length.
3797 length = end_address_of_bss - start_address_of_text.
3798 Remainder is the blob to be loaded contiguously
3799 from start address. */
3801 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3802 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3807 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3808 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3813 /* Now startup only the RX cpu. */
3814 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3815 be32_to_cpu(fw_hdr->base_addr));
3817 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3818 "should be %08x\n", __func__,
3819 tr32(RX_CPU_BASE + CPU_PC),
3820 be32_to_cpu(fw_hdr->base_addr));
3824 tg3_rxcpu_resume(tp);
3829 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3831 const int iters = 1000;
3835 /* Wait for boot code to complete initialization and enter service
3836 * loop. It is then safe to download service patches
3838 for (i = 0; i < iters; i++) {
3839 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3846 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3850 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3852 netdev_warn(tp->dev,
3853 "Other patches exist. Not downloading EEE patch\n");
3860 /* tp->lock is held. */
3861 static void tg3_load_57766_firmware(struct tg3 *tp)
3863 struct tg3_firmware_hdr *fw_hdr;
3865 if (!tg3_flag(tp, NO_NVRAM))
3868 if (tg3_validate_rxcpu_state(tp))
3874 /* This firmware blob has a different format than older firmware
3875 * releases as given below. The main difference is we have fragmented
3876 * data to be written to non-contiguous locations.
3878 * In the beginning we have a firmware header identical to other
3879 * firmware which consists of version, base addr and length. The length
3880 * here is unused and set to 0xffffffff.
3882 * This is followed by a series of firmware fragments which are
3883 * individually identical to previous firmware. i.e. they have the
3884 * firmware header and followed by data for that fragment. The version
3885 * field of the individual fragment header is unused.
3888 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3889 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3892 if (tg3_rxcpu_pause(tp))
3895 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3896 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3898 tg3_rxcpu_resume(tp);
3901 /* tp->lock is held. */
3902 static int tg3_load_tso_firmware(struct tg3 *tp)
3904 const struct tg3_firmware_hdr *fw_hdr;
3905 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3908 if (!tg3_flag(tp, FW_TSO))
3911 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3913 /* Firmware blob starts with version numbers, followed by
3914 start address and length. We are setting complete length.
3915 length = end_address_of_bss - start_address_of_text.
3916 Remainder is the blob to be loaded contiguously
3917 from start address. */
3919 cpu_scratch_size = tp->fw_len;
3921 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3922 cpu_base = RX_CPU_BASE;
3923 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3925 cpu_base = TX_CPU_BASE;
3926 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3927 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3930 err = tg3_load_firmware_cpu(tp, cpu_base,
3931 cpu_scratch_base, cpu_scratch_size,
3936 /* Now startup the cpu. */
3937 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3938 be32_to_cpu(fw_hdr->base_addr));
3941 "%s fails to set CPU PC, is %08x should be %08x\n",
3942 __func__, tr32(cpu_base + CPU_PC),
3943 be32_to_cpu(fw_hdr->base_addr));
3947 tg3_resume_cpu(tp, cpu_base);
3951 /* tp->lock is held. */
3952 static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3954 u32 addr_high, addr_low;
3956 addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3957 addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3958 (mac_addr[4] << 8) | mac_addr[5]);
3961 tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3962 tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3965 tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3966 tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3970 /* tp->lock is held. */
3971 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3976 for (i = 0; i < 4; i++) {
3977 if (i == 1 && skip_mac_1)
3979 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3982 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3983 tg3_asic_rev(tp) == ASIC_REV_5704) {
3984 for (i = 4; i < 16; i++)
3985 __tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3988 addr_high = (tp->dev->dev_addr[0] +
3989 tp->dev->dev_addr[1] +
3990 tp->dev->dev_addr[2] +
3991 tp->dev->dev_addr[3] +
3992 tp->dev->dev_addr[4] +
3993 tp->dev->dev_addr[5]) &
3994 TX_BACKOFF_SEED_MASK;
3995 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3998 static void tg3_enable_register_access(struct tg3 *tp)
4001 * Make sure register accesses (indirect or otherwise) will function
4004 pci_write_config_dword(tp->pdev,
4005 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4008 static int tg3_power_up(struct tg3 *tp)
4012 tg3_enable_register_access(tp);
4014 err = pci_set_power_state(tp->pdev, PCI_D0);
4016 /* Switch out of Vaux if it is a NIC */
4017 tg3_pwrsrc_switch_to_vmain(tp);
4019 netdev_err(tp->dev, "Transition to D0 failed\n");
4025 static int tg3_setup_phy(struct tg3 *, bool);
4027 static int tg3_power_down_prepare(struct tg3 *tp)
4030 bool device_should_wake, do_low_power;
4032 tg3_enable_register_access(tp);
4034 /* Restore the CLKREQ setting. */
4035 if (tg3_flag(tp, CLKREQ_BUG))
4036 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4037 PCI_EXP_LNKCTL_CLKREQ_EN);
4039 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4040 tw32(TG3PCI_MISC_HOST_CTRL,
4041 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4043 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4044 tg3_flag(tp, WOL_ENABLE);
4046 if (tg3_flag(tp, USE_PHYLIB)) {
4047 do_low_power = false;
4048 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4049 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4050 struct phy_device *phydev;
4051 u32 phyid, advertising;
4053 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4055 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4057 tp->link_config.speed = phydev->speed;
4058 tp->link_config.duplex = phydev->duplex;
4059 tp->link_config.autoneg = phydev->autoneg;
4060 tp->link_config.advertising = phydev->advertising;
4062 advertising = ADVERTISED_TP |
4064 ADVERTISED_Autoneg |
4065 ADVERTISED_10baseT_Half;
4067 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4068 if (tg3_flag(tp, WOL_SPEED_100MB))
4070 ADVERTISED_100baseT_Half |
4071 ADVERTISED_100baseT_Full |
4072 ADVERTISED_10baseT_Full;
4074 advertising |= ADVERTISED_10baseT_Full;
4077 phydev->advertising = advertising;
4079 phy_start_aneg(phydev);
4081 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4082 if (phyid != PHY_ID_BCMAC131) {
4083 phyid &= PHY_BCM_OUI_MASK;
4084 if (phyid == PHY_BCM_OUI_1 ||
4085 phyid == PHY_BCM_OUI_2 ||
4086 phyid == PHY_BCM_OUI_3)
4087 do_low_power = true;
4091 do_low_power = true;
4093 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4094 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4096 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4097 tg3_setup_phy(tp, false);
4100 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4103 val = tr32(GRC_VCPU_EXT_CTRL);
4104 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4105 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4109 for (i = 0; i < 200; i++) {
4110 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4111 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4116 if (tg3_flag(tp, WOL_CAP))
4117 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4118 WOL_DRV_STATE_SHUTDOWN |
4122 if (device_should_wake) {
4125 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4127 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4128 tg3_phy_auxctl_write(tp,
4129 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4130 MII_TG3_AUXCTL_PCTL_WOL_EN |
4131 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4132 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4136 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4137 mac_mode = MAC_MODE_PORT_MODE_GMII;
4138 else if (tp->phy_flags &
4139 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4140 if (tp->link_config.active_speed == SPEED_1000)
4141 mac_mode = MAC_MODE_PORT_MODE_GMII;
4143 mac_mode = MAC_MODE_PORT_MODE_MII;
4145 mac_mode = MAC_MODE_PORT_MODE_MII;
4147 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4148 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4149 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4150 SPEED_100 : SPEED_10;
4151 if (tg3_5700_link_polarity(tp, speed))
4152 mac_mode |= MAC_MODE_LINK_POLARITY;
4154 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4157 mac_mode = MAC_MODE_PORT_MODE_TBI;
4160 if (!tg3_flag(tp, 5750_PLUS))
4161 tw32(MAC_LED_CTRL, tp->led_ctrl);
4163 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4164 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4165 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4166 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4168 if (tg3_flag(tp, ENABLE_APE))
4169 mac_mode |= MAC_MODE_APE_TX_EN |
4170 MAC_MODE_APE_RX_EN |
4171 MAC_MODE_TDE_ENABLE;
4173 tw32_f(MAC_MODE, mac_mode);
4176 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4180 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4181 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4182 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4185 base_val = tp->pci_clock_ctrl;
4186 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4187 CLOCK_CTRL_TXCLK_DISABLE);
4189 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4190 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4191 } else if (tg3_flag(tp, 5780_CLASS) ||
4192 tg3_flag(tp, CPMU_PRESENT) ||
4193 tg3_asic_rev(tp) == ASIC_REV_5906) {
4195 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4196 u32 newbits1, newbits2;
4198 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4199 tg3_asic_rev(tp) == ASIC_REV_5701) {
4200 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4201 CLOCK_CTRL_TXCLK_DISABLE |
4203 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4204 } else if (tg3_flag(tp, 5705_PLUS)) {
4205 newbits1 = CLOCK_CTRL_625_CORE;
4206 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4208 newbits1 = CLOCK_CTRL_ALTCLK;
4209 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4212 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4215 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4218 if (!tg3_flag(tp, 5705_PLUS)) {
4221 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4222 tg3_asic_rev(tp) == ASIC_REV_5701) {
4223 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4224 CLOCK_CTRL_TXCLK_DISABLE |
4225 CLOCK_CTRL_44MHZ_CORE);
4227 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4230 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4231 tp->pci_clock_ctrl | newbits3, 40);
4235 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4236 tg3_power_down_phy(tp, do_low_power);
4238 tg3_frob_aux_power(tp, true);
4240 /* Workaround for unstable PLL clock */
4241 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4242 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4243 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4244 u32 val = tr32(0x7d00);
4246 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4248 if (!tg3_flag(tp, ENABLE_ASF)) {
4251 err = tg3_nvram_lock(tp);
4252 tg3_halt_cpu(tp, RX_CPU_BASE);
4254 tg3_nvram_unlock(tp);
4258 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4260 tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4265 static void tg3_power_down(struct tg3 *tp)
4267 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4268 pci_set_power_state(tp->pdev, PCI_D3hot);
4271 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4273 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4274 case MII_TG3_AUX_STAT_10HALF:
4276 *duplex = DUPLEX_HALF;
4279 case MII_TG3_AUX_STAT_10FULL:
4281 *duplex = DUPLEX_FULL;
4284 case MII_TG3_AUX_STAT_100HALF:
4286 *duplex = DUPLEX_HALF;
4289 case MII_TG3_AUX_STAT_100FULL:
4291 *duplex = DUPLEX_FULL;
4294 case MII_TG3_AUX_STAT_1000HALF:
4295 *speed = SPEED_1000;
4296 *duplex = DUPLEX_HALF;
4299 case MII_TG3_AUX_STAT_1000FULL:
4300 *speed = SPEED_1000;
4301 *duplex = DUPLEX_FULL;
4305 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4306 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4308 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4312 *speed = SPEED_UNKNOWN;
4313 *duplex = DUPLEX_UNKNOWN;
4318 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4323 new_adv = ADVERTISE_CSMA;
4324 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4325 new_adv |= mii_advertise_flowctrl(flowctrl);
4327 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4331 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4332 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4334 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4335 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4336 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4338 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4343 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4346 tw32(TG3_CPMU_EEE_MODE,
4347 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4349 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4354 /* Advertise 100-BaseTX EEE ability */
4355 if (advertise & ADVERTISED_100baseT_Full)
4356 val |= MDIO_AN_EEE_ADV_100TX;
4357 /* Advertise 1000-BaseT EEE ability */
4358 if (advertise & ADVERTISED_1000baseT_Full)
4359 val |= MDIO_AN_EEE_ADV_1000T;
4361 if (!tp->eee.eee_enabled) {
4363 tp->eee.advertised = 0;
4365 tp->eee.advertised = advertise &
4366 (ADVERTISED_100baseT_Full |
4367 ADVERTISED_1000baseT_Full);
4370 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4374 switch (tg3_asic_rev(tp)) {
4376 case ASIC_REV_57765:
4377 case ASIC_REV_57766:
4379 /* If we advertised any eee advertisements above... */
4381 val = MII_TG3_DSP_TAP26_ALNOKO |
4382 MII_TG3_DSP_TAP26_RMRXSTO |
4383 MII_TG3_DSP_TAP26_OPCSINPT;
4384 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4388 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4389 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4390 MII_TG3_DSP_CH34TP2_HIBW01);
4393 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4402 static void tg3_phy_copper_begin(struct tg3 *tp)
4404 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4405 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4408 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4409 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4410 adv = ADVERTISED_10baseT_Half |
4411 ADVERTISED_10baseT_Full;
4412 if (tg3_flag(tp, WOL_SPEED_100MB))
4413 adv |= ADVERTISED_100baseT_Half |
4414 ADVERTISED_100baseT_Full;
4415 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4416 if (!(tp->phy_flags &
4417 TG3_PHYFLG_DISABLE_1G_HD_ADV))
4418 adv |= ADVERTISED_1000baseT_Half;
4419 adv |= ADVERTISED_1000baseT_Full;
4422 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4424 adv = tp->link_config.advertising;
4425 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4426 adv &= ~(ADVERTISED_1000baseT_Half |
4427 ADVERTISED_1000baseT_Full);
4429 fc = tp->link_config.flowctrl;
4432 tg3_phy_autoneg_cfg(tp, adv, fc);
4434 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4435 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4436 /* Normally during power down we want to autonegotiate
4437 * the lowest possible speed for WOL. However, to avoid
4438 * link flap, we leave it untouched.
4443 tg3_writephy(tp, MII_BMCR,
4444 BMCR_ANENABLE | BMCR_ANRESTART);
4447 u32 bmcr, orig_bmcr;
4449 tp->link_config.active_speed = tp->link_config.speed;
4450 tp->link_config.active_duplex = tp->link_config.duplex;
4452 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4453 /* With autoneg disabled, 5715 only links up when the
4454 * advertisement register has the configured speed
4457 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4461 switch (tp->link_config.speed) {
4467 bmcr |= BMCR_SPEED100;
4471 bmcr |= BMCR_SPEED1000;
4475 if (tp->link_config.duplex == DUPLEX_FULL)
4476 bmcr |= BMCR_FULLDPLX;
4478 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4479 (bmcr != orig_bmcr)) {
4480 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4481 for (i = 0; i < 1500; i++) {
4485 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4486 tg3_readphy(tp, MII_BMSR, &tmp))
4488 if (!(tmp & BMSR_LSTATUS)) {
4493 tg3_writephy(tp, MII_BMCR, bmcr);
4499 static int tg3_phy_pull_config(struct tg3 *tp)
4504 err = tg3_readphy(tp, MII_BMCR, &val);
4508 if (!(val & BMCR_ANENABLE)) {
4509 tp->link_config.autoneg = AUTONEG_DISABLE;
4510 tp->link_config.advertising = 0;
4511 tg3_flag_clear(tp, PAUSE_AUTONEG);
4515 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4517 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4520 tp->link_config.speed = SPEED_10;
4523 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4526 tp->link_config.speed = SPEED_100;
4528 case BMCR_SPEED1000:
4529 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4530 tp->link_config.speed = SPEED_1000;
4538 if (val & BMCR_FULLDPLX)
4539 tp->link_config.duplex = DUPLEX_FULL;
4541 tp->link_config.duplex = DUPLEX_HALF;
4543 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4549 tp->link_config.autoneg = AUTONEG_ENABLE;
4550 tp->link_config.advertising = ADVERTISED_Autoneg;
4551 tg3_flag_set(tp, PAUSE_AUTONEG);
4553 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4556 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4560 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4561 tp->link_config.advertising |= adv | ADVERTISED_TP;
4563 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4565 tp->link_config.advertising |= ADVERTISED_FIBRE;
4568 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4571 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4572 err = tg3_readphy(tp, MII_CTRL1000, &val);
4576 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4578 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4582 adv = tg3_decode_flowctrl_1000X(val);
4583 tp->link_config.flowctrl = adv;
4585 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4586 adv = mii_adv_to_ethtool_adv_x(val);
4589 tp->link_config.advertising |= adv;
4596 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4600 /* Turn off tap power management. */
4601 /* Set Extended packet length bit */
4602 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4604 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4605 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4606 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4607 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4608 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4615 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4617 struct ethtool_eee eee;
4619 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4622 tg3_eee_pull_config(tp, &eee);
4624 if (tp->eee.eee_enabled) {
4625 if (tp->eee.advertised != eee.advertised ||
4626 tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4627 tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4630 /* EEE is disabled but we're advertising */
4638 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4640 u32 advmsk, tgtadv, advertising;
4642 advertising = tp->link_config.advertising;
4643 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4645 advmsk = ADVERTISE_ALL;
4646 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4647 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4648 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4651 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4654 if ((*lcladv & advmsk) != tgtadv)
4657 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4660 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4662 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4666 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4667 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4668 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4669 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4670 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4672 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4675 if (tg3_ctrl != tgtadv)
4682 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4686 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4689 if (tg3_readphy(tp, MII_STAT1000, &val))
4692 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4695 if (tg3_readphy(tp, MII_LPA, rmtadv))
4698 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4699 tp->link_config.rmt_adv = lpeth;
4704 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4706 if (curr_link_up != tp->link_up) {
4708 netif_carrier_on(tp->dev);
4710 netif_carrier_off(tp->dev);
4711 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4712 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4715 tg3_link_report(tp);
4722 static void tg3_clear_mac_status(struct tg3 *tp)
4727 MAC_STATUS_SYNC_CHANGED |
4728 MAC_STATUS_CFG_CHANGED |
4729 MAC_STATUS_MI_COMPLETION |
4730 MAC_STATUS_LNKSTATE_CHANGED);
4734 static void tg3_setup_eee(struct tg3 *tp)
4738 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4739 TG3_CPMU_EEE_LNKIDL_UART_IDL;
4740 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4741 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4743 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4745 tw32_f(TG3_CPMU_EEE_CTRL,
4746 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4748 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4749 (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4750 TG3_CPMU_EEEMD_LPI_IN_RX |
4751 TG3_CPMU_EEEMD_EEE_ENABLE;
4753 if (tg3_asic_rev(tp) != ASIC_REV_5717)
4754 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4756 if (tg3_flag(tp, ENABLE_APE))
4757 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4759 tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4761 tw32_f(TG3_CPMU_EEE_DBTMR1,
4762 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4763 (tp->eee.tx_lpi_timer & 0xffff));
4765 tw32_f(TG3_CPMU_EEE_DBTMR2,
4766 TG3_CPMU_DBTMR2_APE_TX_2047US |
4767 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4770 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4772 bool current_link_up;
4774 u32 lcl_adv, rmt_adv;
4779 tg3_clear_mac_status(tp);
4781 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4783 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4787 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4789 /* Some third-party PHYs need to be reset on link going
4792 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4793 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4794 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4796 tg3_readphy(tp, MII_BMSR, &bmsr);
4797 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4798 !(bmsr & BMSR_LSTATUS))
4804 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4805 tg3_readphy(tp, MII_BMSR, &bmsr);
4806 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4807 !tg3_flag(tp, INIT_COMPLETE))
4810 if (!(bmsr & BMSR_LSTATUS)) {
4811 err = tg3_init_5401phy_dsp(tp);
4815 tg3_readphy(tp, MII_BMSR, &bmsr);
4816 for (i = 0; i < 1000; i++) {
4818 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4819 (bmsr & BMSR_LSTATUS)) {
4825 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4826 TG3_PHY_REV_BCM5401_B0 &&
4827 !(bmsr & BMSR_LSTATUS) &&
4828 tp->link_config.active_speed == SPEED_1000) {
4829 err = tg3_phy_reset(tp);
4831 err = tg3_init_5401phy_dsp(tp);
4836 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4837 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4838 /* 5701 {A0,B0} CRC bug workaround */
4839 tg3_writephy(tp, 0x15, 0x0a75);
4840 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4842 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4845 /* Clear pending interrupts... */
4846 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4847 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4849 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4850 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4851 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4852 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4854 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4855 tg3_asic_rev(tp) == ASIC_REV_5701) {
4856 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4857 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4858 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4860 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4863 current_link_up = false;
4864 current_speed = SPEED_UNKNOWN;
4865 current_duplex = DUPLEX_UNKNOWN;
4866 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4867 tp->link_config.rmt_adv = 0;
4869 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4870 err = tg3_phy_auxctl_read(tp,
4871 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4873 if (!err && !(val & (1 << 10))) {
4874 tg3_phy_auxctl_write(tp,
4875 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4882 for (i = 0; i < 100; i++) {
4883 tg3_readphy(tp, MII_BMSR, &bmsr);
4884 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4885 (bmsr & BMSR_LSTATUS))
4890 if (bmsr & BMSR_LSTATUS) {
4893 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4894 for (i = 0; i < 2000; i++) {
4896 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4901 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4906 for (i = 0; i < 200; i++) {
4907 tg3_readphy(tp, MII_BMCR, &bmcr);
4908 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4910 if (bmcr && bmcr != 0x7fff)
4918 tp->link_config.active_speed = current_speed;
4919 tp->link_config.active_duplex = current_duplex;
4921 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4922 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4924 if ((bmcr & BMCR_ANENABLE) &&
4926 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4927 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4928 current_link_up = true;
4930 /* EEE settings changes take effect only after a phy
4931 * reset. If we have skipped a reset due to Link Flap
4932 * Avoidance being enabled, do it now.
4934 if (!eee_config_ok &&
4935 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4941 if (!(bmcr & BMCR_ANENABLE) &&
4942 tp->link_config.speed == current_speed &&
4943 tp->link_config.duplex == current_duplex) {
4944 current_link_up = true;
4948 if (current_link_up &&
4949 tp->link_config.active_duplex == DUPLEX_FULL) {
4952 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4953 reg = MII_TG3_FET_GEN_STAT;
4954 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4956 reg = MII_TG3_EXT_STAT;
4957 bit = MII_TG3_EXT_STAT_MDIX;
4960 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4961 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4963 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4968 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4969 tg3_phy_copper_begin(tp);
4971 if (tg3_flag(tp, ROBOSWITCH)) {
4972 current_link_up = true;
4973 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4974 current_speed = SPEED_1000;
4975 current_duplex = DUPLEX_FULL;
4976 tp->link_config.active_speed = current_speed;
4977 tp->link_config.active_duplex = current_duplex;
4980 tg3_readphy(tp, MII_BMSR, &bmsr);
4981 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4982 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4983 current_link_up = true;
4986 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4987 if (current_link_up) {
4988 if (tp->link_config.active_speed == SPEED_100 ||
4989 tp->link_config.active_speed == SPEED_10)
4990 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4992 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4993 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4994 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4996 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4998 /* In order for the 5750 core in BCM4785 chip to work properly
4999 * in RGMII mode, the Led Control Register must be set up.
5001 if (tg3_flag(tp, RGMII_MODE)) {
5002 u32 led_ctrl = tr32(MAC_LED_CTRL);
5003 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5005 if (tp->link_config.active_speed == SPEED_10)
5006 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5007 else if (tp->link_config.active_speed == SPEED_100)
5008 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5009 LED_CTRL_100MBPS_ON);
5010 else if (tp->link_config.active_speed == SPEED_1000)
5011 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5012 LED_CTRL_1000MBPS_ON);
5014 tw32(MAC_LED_CTRL, led_ctrl);
5018 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5019 if (tp->link_config.active_duplex == DUPLEX_HALF)
5020 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5022 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5023 if (current_link_up &&
5024 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5025 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5027 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5030 /* ??? Without this setting Netgear GA302T PHY does not
5031 * ??? send/receive packets...
5033 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5034 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5035 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5036 tw32_f(MAC_MI_MODE, tp->mi_mode);
5040 tw32_f(MAC_MODE, tp->mac_mode);
5043 tg3_phy_eee_adjust(tp, current_link_up);
5045 if (tg3_flag(tp, USE_LINKCHG_REG)) {
5046 /* Polled via timer. */
5047 tw32_f(MAC_EVENT, 0);
5049 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5053 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5055 tp->link_config.active_speed == SPEED_1000 &&
5056 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5059 (MAC_STATUS_SYNC_CHANGED |
5060 MAC_STATUS_CFG_CHANGED));
5063 NIC_SRAM_FIRMWARE_MBOX,
5064 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5067 /* Prevent send BD corruption. */
5068 if (tg3_flag(tp, CLKREQ_BUG)) {
5069 if (tp->link_config.active_speed == SPEED_100 ||
5070 tp->link_config.active_speed == SPEED_10)
5071 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5072 PCI_EXP_LNKCTL_CLKREQ_EN);
5074 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5075 PCI_EXP_LNKCTL_CLKREQ_EN);
5078 tg3_test_and_report_link_chg(tp, current_link_up);
5083 struct tg3_fiber_aneginfo {
5085 #define ANEG_STATE_UNKNOWN 0
5086 #define ANEG_STATE_AN_ENABLE 1
5087 #define ANEG_STATE_RESTART_INIT 2
5088 #define ANEG_STATE_RESTART 3
5089 #define ANEG_STATE_DISABLE_LINK_OK 4
5090 #define ANEG_STATE_ABILITY_DETECT_INIT 5
5091 #define ANEG_STATE_ABILITY_DETECT 6
5092 #define ANEG_STATE_ACK_DETECT_INIT 7
5093 #define ANEG_STATE_ACK_DETECT 8
5094 #define ANEG_STATE_COMPLETE_ACK_INIT 9
5095 #define ANEG_STATE_COMPLETE_ACK 10
5096 #define ANEG_STATE_IDLE_DETECT_INIT 11
5097 #define ANEG_STATE_IDLE_DETECT 12
5098 #define ANEG_STATE_LINK_OK 13
5099 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5100 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5103 #define MR_AN_ENABLE 0x00000001
5104 #define MR_RESTART_AN 0x00000002
5105 #define MR_AN_COMPLETE 0x00000004
5106 #define MR_PAGE_RX 0x00000008
5107 #define MR_NP_LOADED 0x00000010
5108 #define MR_TOGGLE_TX 0x00000020
5109 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5110 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5111 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5112 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5113 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5114 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5115 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5116 #define MR_TOGGLE_RX 0x00002000
5117 #define MR_NP_RX 0x00004000
5119 #define MR_LINK_OK 0x80000000
5121 unsigned long link_time, cur_time;
5123 u32 ability_match_cfg;
5124 int ability_match_count;
5126 char ability_match, idle_match, ack_match;
5128 u32 txconfig, rxconfig;
5129 #define ANEG_CFG_NP 0x00000080
5130 #define ANEG_CFG_ACK 0x00000040
5131 #define ANEG_CFG_RF2 0x00000020
5132 #define ANEG_CFG_RF1 0x00000010
5133 #define ANEG_CFG_PS2 0x00000001
5134 #define ANEG_CFG_PS1 0x00008000
5135 #define ANEG_CFG_HD 0x00004000
5136 #define ANEG_CFG_FD 0x00002000
5137 #define ANEG_CFG_INVAL 0x00001f06
5142 #define ANEG_TIMER_ENAB 2
5143 #define ANEG_FAILED -1
5145 #define ANEG_STATE_SETTLE_TIME 10000
5147 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5148 struct tg3_fiber_aneginfo *ap)
5151 unsigned long delta;
5155 if (ap->state == ANEG_STATE_UNKNOWN) {
5159 ap->ability_match_cfg = 0;
5160 ap->ability_match_count = 0;
5161 ap->ability_match = 0;
5167 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5168 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5170 if (rx_cfg_reg != ap->ability_match_cfg) {
5171 ap->ability_match_cfg = rx_cfg_reg;
5172 ap->ability_match = 0;
5173 ap->ability_match_count = 0;
5175 if (++ap->ability_match_count > 1) {
5176 ap->ability_match = 1;
5177 ap->ability_match_cfg = rx_cfg_reg;
5180 if (rx_cfg_reg & ANEG_CFG_ACK)
5188 ap->ability_match_cfg = 0;
5189 ap->ability_match_count = 0;
5190 ap->ability_match = 0;
5196 ap->rxconfig = rx_cfg_reg;
5199 switch (ap->state) {
5200 case ANEG_STATE_UNKNOWN:
5201 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5202 ap->state = ANEG_STATE_AN_ENABLE;
5205 case ANEG_STATE_AN_ENABLE:
5206 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5207 if (ap->flags & MR_AN_ENABLE) {
5210 ap->ability_match_cfg = 0;
5211 ap->ability_match_count = 0;
5212 ap->ability_match = 0;
5216 ap->state = ANEG_STATE_RESTART_INIT;
5218 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5222 case ANEG_STATE_RESTART_INIT:
5223 ap->link_time = ap->cur_time;
5224 ap->flags &= ~(MR_NP_LOADED);
5226 tw32(MAC_TX_AUTO_NEG, 0);
5227 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5228 tw32_f(MAC_MODE, tp->mac_mode);
5231 ret = ANEG_TIMER_ENAB;
5232 ap->state = ANEG_STATE_RESTART;
5235 case ANEG_STATE_RESTART:
5236 delta = ap->cur_time - ap->link_time;
5237 if (delta > ANEG_STATE_SETTLE_TIME)
5238 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5240 ret = ANEG_TIMER_ENAB;
5243 case ANEG_STATE_DISABLE_LINK_OK:
5247 case ANEG_STATE_ABILITY_DETECT_INIT:
5248 ap->flags &= ~(MR_TOGGLE_TX);
5249 ap->txconfig = ANEG_CFG_FD;
5250 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5251 if (flowctrl & ADVERTISE_1000XPAUSE)
5252 ap->txconfig |= ANEG_CFG_PS1;
5253 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5254 ap->txconfig |= ANEG_CFG_PS2;
5255 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5256 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5257 tw32_f(MAC_MODE, tp->mac_mode);
5260 ap->state = ANEG_STATE_ABILITY_DETECT;
5263 case ANEG_STATE_ABILITY_DETECT:
5264 if (ap->ability_match != 0 && ap->rxconfig != 0)
5265 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5268 case ANEG_STATE_ACK_DETECT_INIT:
5269 ap->txconfig |= ANEG_CFG_ACK;
5270 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5271 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5272 tw32_f(MAC_MODE, tp->mac_mode);
5275 ap->state = ANEG_STATE_ACK_DETECT;
5278 case ANEG_STATE_ACK_DETECT:
5279 if (ap->ack_match != 0) {
5280 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5281 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5282 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5284 ap->state = ANEG_STATE_AN_ENABLE;
5286 } else if (ap->ability_match != 0 &&
5287 ap->rxconfig == 0) {
5288 ap->state = ANEG_STATE_AN_ENABLE;
5292 case ANEG_STATE_COMPLETE_ACK_INIT:
5293 if (ap->rxconfig & ANEG_CFG_INVAL) {
5297 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5298 MR_LP_ADV_HALF_DUPLEX |
5299 MR_LP_ADV_SYM_PAUSE |
5300 MR_LP_ADV_ASYM_PAUSE |
5301 MR_LP_ADV_REMOTE_FAULT1 |
5302 MR_LP_ADV_REMOTE_FAULT2 |
5303 MR_LP_ADV_NEXT_PAGE |
5306 if (ap->rxconfig & ANEG_CFG_FD)
5307 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5308 if (ap->rxconfig & ANEG_CFG_HD)
5309 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5310 if (ap->rxconfig & ANEG_CFG_PS1)
5311 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5312 if (ap->rxconfig & ANEG_CFG_PS2)
5313 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5314 if (ap->rxconfig & ANEG_CFG_RF1)
5315 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5316 if (ap->rxconfig & ANEG_CFG_RF2)
5317 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5318 if (ap->rxconfig & ANEG_CFG_NP)
5319 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5321 ap->link_time = ap->cur_time;
5323 ap->flags ^= (MR_TOGGLE_TX);
5324 if (ap->rxconfig & 0x0008)
5325 ap->flags |= MR_TOGGLE_RX;
5326 if (ap->rxconfig & ANEG_CFG_NP)
5327 ap->flags |= MR_NP_RX;
5328 ap->flags |= MR_PAGE_RX;
5330 ap->state = ANEG_STATE_COMPLETE_ACK;
5331 ret = ANEG_TIMER_ENAB;
5334 case ANEG_STATE_COMPLETE_ACK:
5335 if (ap->ability_match != 0 &&
5336 ap->rxconfig == 0) {
5337 ap->state = ANEG_STATE_AN_ENABLE;
5340 delta = ap->cur_time - ap->link_time;
5341 if (delta > ANEG_STATE_SETTLE_TIME) {
5342 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5343 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5345 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5346 !(ap->flags & MR_NP_RX)) {
5347 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5355 case ANEG_STATE_IDLE_DETECT_INIT:
5356 ap->link_time = ap->cur_time;
5357 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5358 tw32_f(MAC_MODE, tp->mac_mode);
5361 ap->state = ANEG_STATE_IDLE_DETECT;
5362 ret = ANEG_TIMER_ENAB;
5365 case ANEG_STATE_IDLE_DETECT:
5366 if (ap->ability_match != 0 &&
5367 ap->rxconfig == 0) {
5368 ap->state = ANEG_STATE_AN_ENABLE;
5371 delta = ap->cur_time - ap->link_time;
5372 if (delta > ANEG_STATE_SETTLE_TIME) {
5373 /* XXX another gem from the Broadcom driver :( */
5374 ap->state = ANEG_STATE_LINK_OK;
5378 case ANEG_STATE_LINK_OK:
5379 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5383 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5384 /* ??? unimplemented */
5387 case ANEG_STATE_NEXT_PAGE_WAIT:
5388 /* ??? unimplemented */
5399 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5402 struct tg3_fiber_aneginfo aninfo;
5403 int status = ANEG_FAILED;
5407 tw32_f(MAC_TX_AUTO_NEG, 0);
5409 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5410 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5413 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5416 memset(&aninfo, 0, sizeof(aninfo));
5417 aninfo.flags |= MR_AN_ENABLE;
5418 aninfo.state = ANEG_STATE_UNKNOWN;
5419 aninfo.cur_time = 0;
5421 while (++tick < 195000) {
5422 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5423 if (status == ANEG_DONE || status == ANEG_FAILED)
5429 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5430 tw32_f(MAC_MODE, tp->mac_mode);
5433 *txflags = aninfo.txconfig;
5434 *rxflags = aninfo.flags;
5436 if (status == ANEG_DONE &&
5437 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5438 MR_LP_ADV_FULL_DUPLEX)))
5444 static void tg3_init_bcm8002(struct tg3 *tp)
5446 u32 mac_status = tr32(MAC_STATUS);
5449 /* Reset when initting first time or we have a link. */
5450 if (tg3_flag(tp, INIT_COMPLETE) &&
5451 !(mac_status & MAC_STATUS_PCS_SYNCED))
5454 /* Set PLL lock range. */
5455 tg3_writephy(tp, 0x16, 0x8007);
5458 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5460 /* Wait for reset to complete. */
5461 /* XXX schedule_timeout() ... */
5462 for (i = 0; i < 500; i++)
5465 /* Config mode; select PMA/Ch 1 regs. */
5466 tg3_writephy(tp, 0x10, 0x8411);
5468 /* Enable auto-lock and comdet, select txclk for tx. */
5469 tg3_writephy(tp, 0x11, 0x0a10);
5471 tg3_writephy(tp, 0x18, 0x00a0);
5472 tg3_writephy(tp, 0x16, 0x41ff);
5474 /* Assert and deassert POR. */
5475 tg3_writephy(tp, 0x13, 0x0400);
5477 tg3_writephy(tp, 0x13, 0x0000);
5479 tg3_writephy(tp, 0x11, 0x0a50);
5481 tg3_writephy(tp, 0x11, 0x0a10);
5483 /* Wait for signal to stabilize */
5484 /* XXX schedule_timeout() ... */
5485 for (i = 0; i < 15000; i++)
5488 /* Deselect the channel register so we can read the PHYID
5491 tg3_writephy(tp, 0x10, 0x8011);
5494 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5497 bool current_link_up;
5498 u32 sg_dig_ctrl, sg_dig_status;
5499 u32 serdes_cfg, expected_sg_dig_ctrl;
5500 int workaround, port_a;
5503 expected_sg_dig_ctrl = 0;
5506 current_link_up = false;
5508 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5509 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5511 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5514 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5515 /* preserve bits 20-23 for voltage regulator */
5516 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5519 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5521 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5522 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5524 u32 val = serdes_cfg;
5530 tw32_f(MAC_SERDES_CFG, val);
5533 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5535 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5536 tg3_setup_flow_control(tp, 0, 0);
5537 current_link_up = true;
5542 /* Want auto-negotiation. */
5543 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5545 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5546 if (flowctrl & ADVERTISE_1000XPAUSE)
5547 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5548 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5549 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5551 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5552 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5553 tp->serdes_counter &&
5554 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5555 MAC_STATUS_RCVD_CFG)) ==
5556 MAC_STATUS_PCS_SYNCED)) {
5557 tp->serdes_counter--;
5558 current_link_up = true;
5563 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5564 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5566 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5568 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5569 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5570 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5571 MAC_STATUS_SIGNAL_DET)) {
5572 sg_dig_status = tr32(SG_DIG_STATUS);
5573 mac_status = tr32(MAC_STATUS);
5575 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5576 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5577 u32 local_adv = 0, remote_adv = 0;
5579 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5580 local_adv |= ADVERTISE_1000XPAUSE;
5581 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5582 local_adv |= ADVERTISE_1000XPSE_ASYM;
5584 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5585 remote_adv |= LPA_1000XPAUSE;
5586 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5587 remote_adv |= LPA_1000XPAUSE_ASYM;
5589 tp->link_config.rmt_adv =
5590 mii_adv_to_ethtool_adv_x(remote_adv);
5592 tg3_setup_flow_control(tp, local_adv, remote_adv);
5593 current_link_up = true;
5594 tp->serdes_counter = 0;
5595 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5596 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5597 if (tp->serdes_counter)
5598 tp->serdes_counter--;
5601 u32 val = serdes_cfg;
5608 tw32_f(MAC_SERDES_CFG, val);
5611 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5614 /* Link parallel detection - link is up */
5615 /* only if we have PCS_SYNC and not */
5616 /* receiving config code words */
5617 mac_status = tr32(MAC_STATUS);
5618 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5619 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5620 tg3_setup_flow_control(tp, 0, 0);
5621 current_link_up = true;
5623 TG3_PHYFLG_PARALLEL_DETECT;
5624 tp->serdes_counter =
5625 SERDES_PARALLEL_DET_TIMEOUT;
5627 goto restart_autoneg;
5631 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5632 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5636 return current_link_up;
5639 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5641 bool current_link_up = false;
5643 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5646 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5647 u32 txflags, rxflags;
5650 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5651 u32 local_adv = 0, remote_adv = 0;
5653 if (txflags & ANEG_CFG_PS1)
5654 local_adv |= ADVERTISE_1000XPAUSE;
5655 if (txflags & ANEG_CFG_PS2)
5656 local_adv |= ADVERTISE_1000XPSE_ASYM;
5658 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5659 remote_adv |= LPA_1000XPAUSE;
5660 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5661 remote_adv |= LPA_1000XPAUSE_ASYM;
5663 tp->link_config.rmt_adv =
5664 mii_adv_to_ethtool_adv_x(remote_adv);
5666 tg3_setup_flow_control(tp, local_adv, remote_adv);
5668 current_link_up = true;
5670 for (i = 0; i < 30; i++) {
5673 (MAC_STATUS_SYNC_CHANGED |
5674 MAC_STATUS_CFG_CHANGED));
5676 if ((tr32(MAC_STATUS) &
5677 (MAC_STATUS_SYNC_CHANGED |
5678 MAC_STATUS_CFG_CHANGED)) == 0)
5682 mac_status = tr32(MAC_STATUS);
5683 if (!current_link_up &&
5684 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5685 !(mac_status & MAC_STATUS_RCVD_CFG))
5686 current_link_up = true;
5688 tg3_setup_flow_control(tp, 0, 0);
5690 /* Forcing 1000FD link up. */
5691 current_link_up = true;
5693 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5696 tw32_f(MAC_MODE, tp->mac_mode);
5701 return current_link_up;
5704 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5707 u16 orig_active_speed;
5708 u8 orig_active_duplex;
5710 bool current_link_up;
5713 orig_pause_cfg = tp->link_config.active_flowctrl;
5714 orig_active_speed = tp->link_config.active_speed;
5715 orig_active_duplex = tp->link_config.active_duplex;
5717 if (!tg3_flag(tp, HW_AUTONEG) &&
5719 tg3_flag(tp, INIT_COMPLETE)) {
5720 mac_status = tr32(MAC_STATUS);
5721 mac_status &= (MAC_STATUS_PCS_SYNCED |
5722 MAC_STATUS_SIGNAL_DET |
5723 MAC_STATUS_CFG_CHANGED |
5724 MAC_STATUS_RCVD_CFG);
5725 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5726 MAC_STATUS_SIGNAL_DET)) {
5727 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5728 MAC_STATUS_CFG_CHANGED));
5733 tw32_f(MAC_TX_AUTO_NEG, 0);
5735 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5736 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5737 tw32_f(MAC_MODE, tp->mac_mode);
5740 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5741 tg3_init_bcm8002(tp);
5743 /* Enable link change event even when serdes polling. */
5744 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5747 current_link_up = false;
5748 tp->link_config.rmt_adv = 0;
5749 mac_status = tr32(MAC_STATUS);
5751 if (tg3_flag(tp, HW_AUTONEG))
5752 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5754 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5756 tp->napi[0].hw_status->status =
5757 (SD_STATUS_UPDATED |
5758 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5760 for (i = 0; i < 100; i++) {
5761 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5762 MAC_STATUS_CFG_CHANGED));
5764 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5765 MAC_STATUS_CFG_CHANGED |
5766 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5770 mac_status = tr32(MAC_STATUS);
5771 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5772 current_link_up = false;
5773 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5774 tp->serdes_counter == 0) {
5775 tw32_f(MAC_MODE, (tp->mac_mode |
5776 MAC_MODE_SEND_CONFIGS));
5778 tw32_f(MAC_MODE, tp->mac_mode);
5782 if (current_link_up) {
5783 tp->link_config.active_speed = SPEED_1000;
5784 tp->link_config.active_duplex = DUPLEX_FULL;
5785 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5786 LED_CTRL_LNKLED_OVERRIDE |
5787 LED_CTRL_1000MBPS_ON));
5789 tp->link_config.active_speed = SPEED_UNKNOWN;
5790 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5791 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5792 LED_CTRL_LNKLED_OVERRIDE |
5793 LED_CTRL_TRAFFIC_OVERRIDE));
5796 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5797 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5798 if (orig_pause_cfg != now_pause_cfg ||
5799 orig_active_speed != tp->link_config.active_speed ||
5800 orig_active_duplex != tp->link_config.active_duplex)
5801 tg3_link_report(tp);
5807 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5811 u16 current_speed = SPEED_UNKNOWN;
5812 u8 current_duplex = DUPLEX_UNKNOWN;
5813 bool current_link_up = false;
5814 u32 local_adv, remote_adv, sgsr;
5816 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5817 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5818 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5819 (sgsr & SERDES_TG3_SGMII_MODE)) {
5824 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5826 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5827 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5829 current_link_up = true;
5830 if (sgsr & SERDES_TG3_SPEED_1000) {
5831 current_speed = SPEED_1000;
5832 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5833 } else if (sgsr & SERDES_TG3_SPEED_100) {
5834 current_speed = SPEED_100;
5835 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837 current_speed = SPEED_10;
5838 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5841 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5842 current_duplex = DUPLEX_FULL;
5844 current_duplex = DUPLEX_HALF;
5847 tw32_f(MAC_MODE, tp->mac_mode);
5850 tg3_clear_mac_status(tp);
5852 goto fiber_setup_done;
5855 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5856 tw32_f(MAC_MODE, tp->mac_mode);
5859 tg3_clear_mac_status(tp);
5864 tp->link_config.rmt_adv = 0;
5866 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5867 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5868 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5869 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5870 bmsr |= BMSR_LSTATUS;
5872 bmsr &= ~BMSR_LSTATUS;
5875 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5877 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5878 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5879 /* do nothing, just check for link up at the end */
5880 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5883 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5884 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5885 ADVERTISE_1000XPAUSE |
5886 ADVERTISE_1000XPSE_ASYM |
5889 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5890 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5892 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5893 tg3_writephy(tp, MII_ADVERTISE, newadv);
5894 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5895 tg3_writephy(tp, MII_BMCR, bmcr);
5897 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5898 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5899 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5906 bmcr &= ~BMCR_SPEED1000;
5907 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5909 if (tp->link_config.duplex == DUPLEX_FULL)
5910 new_bmcr |= BMCR_FULLDPLX;
5912 if (new_bmcr != bmcr) {
5913 /* BMCR_SPEED1000 is a reserved bit that needs
5914 * to be set on write.
5916 new_bmcr |= BMCR_SPEED1000;
5918 /* Force a linkdown */
5922 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5923 adv &= ~(ADVERTISE_1000XFULL |
5924 ADVERTISE_1000XHALF |
5926 tg3_writephy(tp, MII_ADVERTISE, adv);
5927 tg3_writephy(tp, MII_BMCR, bmcr |
5931 tg3_carrier_off(tp);
5933 tg3_writephy(tp, MII_BMCR, new_bmcr);
5935 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5936 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5937 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5938 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5939 bmsr |= BMSR_LSTATUS;
5941 bmsr &= ~BMSR_LSTATUS;
5943 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5947 if (bmsr & BMSR_LSTATUS) {
5948 current_speed = SPEED_1000;
5949 current_link_up = true;
5950 if (bmcr & BMCR_FULLDPLX)
5951 current_duplex = DUPLEX_FULL;
5953 current_duplex = DUPLEX_HALF;
5958 if (bmcr & BMCR_ANENABLE) {
5961 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5962 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5963 common = local_adv & remote_adv;
5964 if (common & (ADVERTISE_1000XHALF |
5965 ADVERTISE_1000XFULL)) {
5966 if (common & ADVERTISE_1000XFULL)
5967 current_duplex = DUPLEX_FULL;
5969 current_duplex = DUPLEX_HALF;
5971 tp->link_config.rmt_adv =
5972 mii_adv_to_ethtool_adv_x(remote_adv);
5973 } else if (!tg3_flag(tp, 5780_CLASS)) {
5974 /* Link is up via parallel detect */
5976 current_link_up = false;
5982 if (current_link_up && current_duplex == DUPLEX_FULL)
5983 tg3_setup_flow_control(tp, local_adv, remote_adv);
5985 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5986 if (tp->link_config.active_duplex == DUPLEX_HALF)
5987 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5989 tw32_f(MAC_MODE, tp->mac_mode);
5992 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5994 tp->link_config.active_speed = current_speed;
5995 tp->link_config.active_duplex = current_duplex;
5997 tg3_test_and_report_link_chg(tp, current_link_up);
6001 static void tg3_serdes_parallel_detect(struct tg3 *tp)
6003 if (tp->serdes_counter) {
6004 /* Give autoneg time to complete. */
6005 tp->serdes_counter--;
6010 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6013 tg3_readphy(tp, MII_BMCR, &bmcr);
6014 if (bmcr & BMCR_ANENABLE) {
6017 /* Select shadow register 0x1f */
6018 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6019 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6021 /* Select expansion interrupt status register */
6022 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6023 MII_TG3_DSP_EXP1_INT_STAT);
6024 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6025 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6027 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6028 /* We have signal detect and not receiving
6029 * config code words, link is up by parallel
6033 bmcr &= ~BMCR_ANENABLE;
6034 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6035 tg3_writephy(tp, MII_BMCR, bmcr);
6036 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6039 } else if (tp->link_up &&
6040 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6041 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6044 /* Select expansion interrupt status register */
6045 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6046 MII_TG3_DSP_EXP1_INT_STAT);
6047 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6051 /* Config code words received, turn on autoneg. */
6052 tg3_readphy(tp, MII_BMCR, &bmcr);
6053 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6055 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6061 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6066 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6067 err = tg3_setup_fiber_phy(tp, force_reset);
6068 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6069 err = tg3_setup_fiber_mii_phy(tp, force_reset);
6071 err = tg3_setup_copper_phy(tp, force_reset);
6073 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6076 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6077 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6079 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6084 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6085 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6086 tw32(GRC_MISC_CFG, val);
6089 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6090 (6 << TX_LENGTHS_IPG_SHIFT);
6091 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6092 tg3_asic_rev(tp) == ASIC_REV_5762)
6093 val |= tr32(MAC_TX_LENGTHS) &
6094 (TX_LENGTHS_JMB_FRM_LEN_MSK |
6095 TX_LENGTHS_CNT_DWN_VAL_MSK);
6097 if (tp->link_config.active_speed == SPEED_1000 &&
6098 tp->link_config.active_duplex == DUPLEX_HALF)
6099 tw32(MAC_TX_LENGTHS, val |
6100 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6102 tw32(MAC_TX_LENGTHS, val |
6103 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6105 if (!tg3_flag(tp, 5705_PLUS)) {
6107 tw32(HOSTCC_STAT_COAL_TICKS,
6108 tp->coal.stats_block_coalesce_usecs);
6110 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6114 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6115 val = tr32(PCIE_PWR_MGMT_THRESH);
6117 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6120 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6121 tw32(PCIE_PWR_MGMT_THRESH, val);
6127 /* tp->lock must be held */
6128 static u64 tg3_refclk_read(struct tg3 *tp)
6130 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6131 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6134 /* tp->lock must be held */
6135 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6137 u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6139 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6140 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6141 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6142 tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6145 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6146 static inline void tg3_full_unlock(struct tg3 *tp);
6147 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6149 struct tg3 *tp = netdev_priv(dev);
6151 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6152 SOF_TIMESTAMPING_RX_SOFTWARE |
6153 SOF_TIMESTAMPING_SOFTWARE;
6155 if (tg3_flag(tp, PTP_CAPABLE)) {
6156 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6157 SOF_TIMESTAMPING_RX_HARDWARE |
6158 SOF_TIMESTAMPING_RAW_HARDWARE;
6162 info->phc_index = ptp_clock_index(tp->ptp_clock);
6164 info->phc_index = -1;
6166 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6168 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6169 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6170 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6171 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6175 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6177 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6178 bool neg_adj = false;
6186 /* Frequency adjustment is performed using hardware with a 24 bit
6187 * accumulator and a programmable correction value. On each clk, the
6188 * correction value gets added to the accumulator and when it
6189 * overflows, the time counter is incremented/decremented.
6191 * So conversion from ppb to correction value is
6192 * ppb * (1 << 24) / 1000000000
6194 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6195 TG3_EAV_REF_CLK_CORRECT_MASK;
6197 tg3_full_lock(tp, 0);
6200 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6201 TG3_EAV_REF_CLK_CORRECT_EN |
6202 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6204 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6206 tg3_full_unlock(tp);
6211 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6213 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6215 tg3_full_lock(tp, 0);
6216 tp->ptp_adjust += delta;
6217 tg3_full_unlock(tp);
6222 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6226 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6228 tg3_full_lock(tp, 0);
6229 ns = tg3_refclk_read(tp);
6230 ns += tp->ptp_adjust;
6231 tg3_full_unlock(tp);
6233 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6234 ts->tv_nsec = remainder;
6239 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6240 const struct timespec *ts)
6243 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6245 ns = timespec_to_ns(ts);
6247 tg3_full_lock(tp, 0);
6248 tg3_refclk_write(tp, ns);
6250 tg3_full_unlock(tp);
6255 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6256 struct ptp_clock_request *rq, int on)
6258 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6263 case PTP_CLK_REQ_PEROUT:
6264 if (rq->perout.index != 0)
6267 tg3_full_lock(tp, 0);
6268 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6269 clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6274 nsec = rq->perout.start.sec * 1000000000ULL +
6275 rq->perout.start.nsec;
6277 if (rq->perout.period.sec || rq->perout.period.nsec) {
6278 netdev_warn(tp->dev,
6279 "Device supports only a one-shot timesync output, period must be 0\n");
6284 if (nsec & (1ULL << 63)) {
6285 netdev_warn(tp->dev,
6286 "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6291 tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6292 tw32(TG3_EAV_WATCHDOG0_MSB,
6293 TG3_EAV_WATCHDOG0_EN |
6294 ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6296 tw32(TG3_EAV_REF_CLCK_CTL,
6297 clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6299 tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6300 tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6304 tg3_full_unlock(tp);
6314 static const struct ptp_clock_info tg3_ptp_caps = {
6315 .owner = THIS_MODULE,
6316 .name = "tg3 clock",
6317 .max_adj = 250000000,
6322 .adjfreq = tg3_ptp_adjfreq,
6323 .adjtime = tg3_ptp_adjtime,
6324 .gettime = tg3_ptp_gettime,
6325 .settime = tg3_ptp_settime,
6326 .enable = tg3_ptp_enable,
6329 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6330 struct skb_shared_hwtstamps *timestamp)
6332 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6333 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6337 /* tp->lock must be held */
6338 static void tg3_ptp_init(struct tg3 *tp)
6340 if (!tg3_flag(tp, PTP_CAPABLE))
6343 /* Initialize the hardware clock to the system time. */
6344 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6346 tp->ptp_info = tg3_ptp_caps;
6349 /* tp->lock must be held */
6350 static void tg3_ptp_resume(struct tg3 *tp)
6352 if (!tg3_flag(tp, PTP_CAPABLE))
6355 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6359 static void tg3_ptp_fini(struct tg3 *tp)
6361 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6364 ptp_clock_unregister(tp->ptp_clock);
6365 tp->ptp_clock = NULL;
6369 static inline int tg3_irq_sync(struct tg3 *tp)
6371 return tp->irq_sync;
6374 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6378 dst = (u32 *)((u8 *)dst + off);
6379 for (i = 0; i < len; i += sizeof(u32))
6380 *dst++ = tr32(off + i);
6383 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6385 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6386 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6387 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6388 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6389 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6390 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6391 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6392 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6393 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6394 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6395 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6396 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6397 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6398 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6399 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6400 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6401 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6402 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6403 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6405 if (tg3_flag(tp, SUPPORT_MSIX))
6406 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6408 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6409 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6410 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6411 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6412 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6413 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6414 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6415 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6417 if (!tg3_flag(tp, 5705_PLUS)) {
6418 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6419 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6420 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6423 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6424 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6425 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6426 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6427 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6429 if (tg3_flag(tp, NVRAM))
6430 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6433 static void tg3_dump_state(struct tg3 *tp)
6438 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6442 if (tg3_flag(tp, PCI_EXPRESS)) {
6443 /* Read up to but not including private PCI registers */
6444 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6445 regs[i / sizeof(u32)] = tr32(i);
6447 tg3_dump_legacy_regs(tp, regs);
6449 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6450 if (!regs[i + 0] && !regs[i + 1] &&
6451 !regs[i + 2] && !regs[i + 3])
6454 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6456 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6461 for (i = 0; i < tp->irq_cnt; i++) {
6462 struct tg3_napi *tnapi = &tp->napi[i];
6464 /* SW status block */
6466 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6468 tnapi->hw_status->status,
6469 tnapi->hw_status->status_tag,
6470 tnapi->hw_status->rx_jumbo_consumer,
6471 tnapi->hw_status->rx_consumer,
6472 tnapi->hw_status->rx_mini_consumer,
6473 tnapi->hw_status->idx[0].rx_producer,
6474 tnapi->hw_status->idx[0].tx_consumer);
6477 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6479 tnapi->last_tag, tnapi->last_irq_tag,
6480 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6482 tnapi->prodring.rx_std_prod_idx,
6483 tnapi->prodring.rx_std_cons_idx,
6484 tnapi->prodring.rx_jmb_prod_idx,
6485 tnapi->prodring.rx_jmb_cons_idx);
6489 /* This is called whenever we suspect that the system chipset is re-
6490 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6491 * is bogus tx completions. We try to recover by setting the
6492 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6495 static void tg3_tx_recover(struct tg3 *tp)
6497 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6498 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6500 netdev_warn(tp->dev,
6501 "The system may be re-ordering memory-mapped I/O "
6502 "cycles to the network device, attempting to recover. "
6503 "Please report the problem to the driver maintainer "
6504 "and include system chipset information.\n");
6506 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6509 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6511 /* Tell compiler to fetch tx indices from memory. */
6513 return tnapi->tx_pending -
6514 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6517 /* Tigon3 never reports partial packet sends. So we do not
6518 * need special logic to handle SKBs that have not had all
6519 * of their frags sent yet, like SunGEM does.
6521 static void tg3_tx(struct tg3_napi *tnapi)
6523 struct tg3 *tp = tnapi->tp;
6524 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6525 u32 sw_idx = tnapi->tx_cons;
6526 struct netdev_queue *txq;
6527 int index = tnapi - tp->napi;
6528 unsigned int pkts_compl = 0, bytes_compl = 0;
6530 if (tg3_flag(tp, ENABLE_TSS))
6533 txq = netdev_get_tx_queue(tp->dev, index);
6535 while (sw_idx != hw_idx) {
6536 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6537 struct sk_buff *skb = ri->skb;
6540 if (unlikely(skb == NULL)) {
6545 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6546 struct skb_shared_hwtstamps timestamp;
6547 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6548 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6550 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6552 skb_tstamp_tx(skb, ×tamp);
6555 pci_unmap_single(tp->pdev,
6556 dma_unmap_addr(ri, mapping),
6562 while (ri->fragmented) {
6563 ri->fragmented = false;
6564 sw_idx = NEXT_TX(sw_idx);
6565 ri = &tnapi->tx_buffers[sw_idx];
6568 sw_idx = NEXT_TX(sw_idx);
6570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6571 ri = &tnapi->tx_buffers[sw_idx];
6572 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6575 pci_unmap_page(tp->pdev,
6576 dma_unmap_addr(ri, mapping),
6577 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6580 while (ri->fragmented) {
6581 ri->fragmented = false;
6582 sw_idx = NEXT_TX(sw_idx);
6583 ri = &tnapi->tx_buffers[sw_idx];
6586 sw_idx = NEXT_TX(sw_idx);
6590 bytes_compl += skb->len;
6594 if (unlikely(tx_bug)) {
6600 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6602 tnapi->tx_cons = sw_idx;
6604 /* Need to make the tx_cons update visible to tg3_start_xmit()
6605 * before checking for netif_queue_stopped(). Without the
6606 * memory barrier, there is a small possibility that tg3_start_xmit()
6607 * will miss it and cause the queue to be stopped forever.
6611 if (unlikely(netif_tx_queue_stopped(txq) &&
6612 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6613 __netif_tx_lock(txq, smp_processor_id());
6614 if (netif_tx_queue_stopped(txq) &&
6615 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6616 netif_tx_wake_queue(txq);
6617 __netif_tx_unlock(txq);
6621 static void tg3_frag_free(bool is_frag, void *data)
6624 put_page(virt_to_head_page(data));
6629 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6631 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6632 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6637 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6638 map_sz, PCI_DMA_FROMDEVICE);
6639 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6644 /* Returns size of skb allocated or < 0 on error.
6646 * We only need to fill in the address because the other members
6647 * of the RX descriptor are invariant, see tg3_init_rings.
6649 * Note the purposeful assymetry of cpu vs. chip accesses. For
6650 * posting buffers we only dirty the first cache line of the RX
6651 * descriptor (containing the address). Whereas for the RX status
6652 * buffers the cpu only reads the last cacheline of the RX descriptor
6653 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6655 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6656 u32 opaque_key, u32 dest_idx_unmasked,
6657 unsigned int *frag_size)
6659 struct tg3_rx_buffer_desc *desc;
6660 struct ring_info *map;
6663 int skb_size, data_size, dest_idx;
6665 switch (opaque_key) {
6666 case RXD_OPAQUE_RING_STD:
6667 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6668 desc = &tpr->rx_std[dest_idx];
6669 map = &tpr->rx_std_buffers[dest_idx];
6670 data_size = tp->rx_pkt_map_sz;
6673 case RXD_OPAQUE_RING_JUMBO:
6674 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6675 desc = &tpr->rx_jmb[dest_idx].std;
6676 map = &tpr->rx_jmb_buffers[dest_idx];
6677 data_size = TG3_RX_JMB_MAP_SZ;
6684 /* Do not overwrite any of the map or rp information
6685 * until we are sure we can commit to a new buffer.
6687 * Callers depend upon this behavior and assume that
6688 * we leave everything unchanged if we fail.
6690 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6691 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6692 if (skb_size <= PAGE_SIZE) {
6693 data = netdev_alloc_frag(skb_size);
6694 *frag_size = skb_size;
6696 data = kmalloc(skb_size, GFP_ATOMIC);
6702 mapping = pci_map_single(tp->pdev,
6703 data + TG3_RX_OFFSET(tp),
6705 PCI_DMA_FROMDEVICE);
6706 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6707 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6712 dma_unmap_addr_set(map, mapping, mapping);
6714 desc->addr_hi = ((u64)mapping >> 32);
6715 desc->addr_lo = ((u64)mapping & 0xffffffff);
6720 /* We only need to move over in the address because the other
6721 * members of the RX descriptor are invariant. See notes above
6722 * tg3_alloc_rx_data for full details.
6724 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6725 struct tg3_rx_prodring_set *dpr,
6726 u32 opaque_key, int src_idx,
6727 u32 dest_idx_unmasked)
6729 struct tg3 *tp = tnapi->tp;
6730 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6731 struct ring_info *src_map, *dest_map;
6732 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6735 switch (opaque_key) {
6736 case RXD_OPAQUE_RING_STD:
6737 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6738 dest_desc = &dpr->rx_std[dest_idx];
6739 dest_map = &dpr->rx_std_buffers[dest_idx];
6740 src_desc = &spr->rx_std[src_idx];
6741 src_map = &spr->rx_std_buffers[src_idx];
6744 case RXD_OPAQUE_RING_JUMBO:
6745 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6746 dest_desc = &dpr->rx_jmb[dest_idx].std;
6747 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6748 src_desc = &spr->rx_jmb[src_idx].std;
6749 src_map = &spr->rx_jmb_buffers[src_idx];
6756 dest_map->data = src_map->data;
6757 dma_unmap_addr_set(dest_map, mapping,
6758 dma_unmap_addr(src_map, mapping));
6759 dest_desc->addr_hi = src_desc->addr_hi;
6760 dest_desc->addr_lo = src_desc->addr_lo;
6762 /* Ensure that the update to the skb happens after the physical
6763 * addresses have been transferred to the new BD location.
6767 src_map->data = NULL;
6770 /* The RX ring scheme is composed of multiple rings which post fresh
6771 * buffers to the chip, and one special ring the chip uses to report
6772 * status back to the host.
6774 * The special ring reports the status of received packets to the
6775 * host. The chip does not write into the original descriptor the
6776 * RX buffer was obtained from. The chip simply takes the original
6777 * descriptor as provided by the host, updates the status and length
6778 * field, then writes this into the next status ring entry.
6780 * Each ring the host uses to post buffers to the chip is described
6781 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6782 * it is first placed into the on-chip ram. When the packet's length
6783 * is known, it walks down the TG3_BDINFO entries to select the ring.
6784 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6785 * which is within the range of the new packet's length is chosen.
6787 * The "separate ring for rx status" scheme may sound queer, but it makes
6788 * sense from a cache coherency perspective. If only the host writes
6789 * to the buffer post rings, and only the chip writes to the rx status
6790 * rings, then cache lines never move beyond shared-modified state.
6791 * If both the host and chip were to write into the same ring, cache line
6792 * eviction could occur since both entities want it in an exclusive state.
6794 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6796 struct tg3 *tp = tnapi->tp;
6797 u32 work_mask, rx_std_posted = 0;
6798 u32 std_prod_idx, jmb_prod_idx;
6799 u32 sw_idx = tnapi->rx_rcb_ptr;
6802 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6804 hw_idx = *(tnapi->rx_rcb_prod_idx);
6806 * We need to order the read of hw_idx and the read of
6807 * the opaque cookie.
6812 std_prod_idx = tpr->rx_std_prod_idx;
6813 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6814 while (sw_idx != hw_idx && budget > 0) {
6815 struct ring_info *ri;
6816 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6818 struct sk_buff *skb;
6819 dma_addr_t dma_addr;
6820 u32 opaque_key, desc_idx, *post_ptr;
6824 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6825 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6826 if (opaque_key == RXD_OPAQUE_RING_STD) {
6827 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6828 dma_addr = dma_unmap_addr(ri, mapping);
6830 post_ptr = &std_prod_idx;
6832 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6833 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6834 dma_addr = dma_unmap_addr(ri, mapping);
6836 post_ptr = &jmb_prod_idx;
6838 goto next_pkt_nopost;
6840 work_mask |= opaque_key;
6842 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6843 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6845 tg3_recycle_rx(tnapi, tpr, opaque_key,
6846 desc_idx, *post_ptr);
6848 /* Other statistics kept track of by card. */
6853 prefetch(data + TG3_RX_OFFSET(tp));
6854 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6857 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6858 RXD_FLAG_PTPSTAT_PTPV1 ||
6859 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6860 RXD_FLAG_PTPSTAT_PTPV2) {
6861 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6862 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6865 if (len > TG3_RX_COPY_THRESH(tp)) {
6867 unsigned int frag_size;
6869 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6870 *post_ptr, &frag_size);
6874 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6875 PCI_DMA_FROMDEVICE);
6877 /* Ensure that the update to the data happens
6878 * after the usage of the old DMA mapping.
6884 skb = build_skb(data, frag_size);
6886 tg3_frag_free(frag_size != 0, data);
6887 goto drop_it_no_recycle;
6889 skb_reserve(skb, TG3_RX_OFFSET(tp));
6891 tg3_recycle_rx(tnapi, tpr, opaque_key,
6892 desc_idx, *post_ptr);
6894 skb = netdev_alloc_skb(tp->dev,
6895 len + TG3_RAW_IP_ALIGN);
6897 goto drop_it_no_recycle;
6899 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6900 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6902 data + TG3_RX_OFFSET(tp),
6904 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6909 tg3_hwclock_to_timestamp(tp, tstamp,
6910 skb_hwtstamps(skb));
6912 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6913 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6914 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6915 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6916 skb->ip_summed = CHECKSUM_UNNECESSARY;
6918 skb_checksum_none_assert(skb);
6920 skb->protocol = eth_type_trans(skb, tp->dev);
6922 if (len > (tp->dev->mtu + ETH_HLEN) &&
6923 skb->protocol != htons(ETH_P_8021Q)) {
6925 goto drop_it_no_recycle;
6928 if (desc->type_flags & RXD_FLAG_VLAN &&
6929 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6930 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6931 desc->err_vlan & RXD_VLAN_MASK);
6933 napi_gro_receive(&tnapi->napi, skb);
6941 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6942 tpr->rx_std_prod_idx = std_prod_idx &
6943 tp->rx_std_ring_mask;
6944 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6945 tpr->rx_std_prod_idx);
6946 work_mask &= ~RXD_OPAQUE_RING_STD;
6951 sw_idx &= tp->rx_ret_ring_mask;
6953 /* Refresh hw_idx to see if there is new work */
6954 if (sw_idx == hw_idx) {
6955 hw_idx = *(tnapi->rx_rcb_prod_idx);
6960 /* ACK the status ring. */
6961 tnapi->rx_rcb_ptr = sw_idx;
6962 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6964 /* Refill RX ring(s). */
6965 if (!tg3_flag(tp, ENABLE_RSS)) {
6966 /* Sync BD data before updating mailbox */
6969 if (work_mask & RXD_OPAQUE_RING_STD) {
6970 tpr->rx_std_prod_idx = std_prod_idx &
6971 tp->rx_std_ring_mask;
6972 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6973 tpr->rx_std_prod_idx);
6975 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6976 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6977 tp->rx_jmb_ring_mask;
6978 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6979 tpr->rx_jmb_prod_idx);
6982 } else if (work_mask) {
6983 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6984 * updated before the producer indices can be updated.
6988 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6989 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6991 if (tnapi != &tp->napi[1]) {
6992 tp->rx_refill = true;
6993 napi_schedule(&tp->napi[1].napi);
7000 static void tg3_poll_link(struct tg3 *tp)
7002 /* handle link change and other phy events */
7003 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7004 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7006 if (sblk->status & SD_STATUS_LINK_CHG) {
7007 sblk->status = SD_STATUS_UPDATED |
7008 (sblk->status & ~SD_STATUS_LINK_CHG);
7009 spin_lock(&tp->lock);
7010 if (tg3_flag(tp, USE_PHYLIB)) {
7012 (MAC_STATUS_SYNC_CHANGED |
7013 MAC_STATUS_CFG_CHANGED |
7014 MAC_STATUS_MI_COMPLETION |
7015 MAC_STATUS_LNKSTATE_CHANGED));
7018 tg3_setup_phy(tp, false);
7019 spin_unlock(&tp->lock);
7024 static int tg3_rx_prodring_xfer(struct tg3 *tp,
7025 struct tg3_rx_prodring_set *dpr,
7026 struct tg3_rx_prodring_set *spr)
7028 u32 si, di, cpycnt, src_prod_idx;
7032 src_prod_idx = spr->rx_std_prod_idx;
7034 /* Make sure updates to the rx_std_buffers[] entries and the
7035 * standard producer index are seen in the correct order.
7039 if (spr->rx_std_cons_idx == src_prod_idx)
7042 if (spr->rx_std_cons_idx < src_prod_idx)
7043 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7045 cpycnt = tp->rx_std_ring_mask + 1 -
7046 spr->rx_std_cons_idx;
7048 cpycnt = min(cpycnt,
7049 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7051 si = spr->rx_std_cons_idx;
7052 di = dpr->rx_std_prod_idx;
7054 for (i = di; i < di + cpycnt; i++) {
7055 if (dpr->rx_std_buffers[i].data) {
7065 /* Ensure that updates to the rx_std_buffers ring and the
7066 * shadowed hardware producer ring from tg3_recycle_skb() are
7067 * ordered correctly WRT the skb check above.
7071 memcpy(&dpr->rx_std_buffers[di],
7072 &spr->rx_std_buffers[si],
7073 cpycnt * sizeof(struct ring_info));
7075 for (i = 0; i < cpycnt; i++, di++, si++) {
7076 struct tg3_rx_buffer_desc *sbd, *dbd;
7077 sbd = &spr->rx_std[si];
7078 dbd = &dpr->rx_std[di];
7079 dbd->addr_hi = sbd->addr_hi;
7080 dbd->addr_lo = sbd->addr_lo;
7083 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7084 tp->rx_std_ring_mask;
7085 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7086 tp->rx_std_ring_mask;
7090 src_prod_idx = spr->rx_jmb_prod_idx;
7092 /* Make sure updates to the rx_jmb_buffers[] entries and
7093 * the jumbo producer index are seen in the correct order.
7097 if (spr->rx_jmb_cons_idx == src_prod_idx)
7100 if (spr->rx_jmb_cons_idx < src_prod_idx)
7101 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7103 cpycnt = tp->rx_jmb_ring_mask + 1 -
7104 spr->rx_jmb_cons_idx;
7106 cpycnt = min(cpycnt,
7107 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7109 si = spr->rx_jmb_cons_idx;
7110 di = dpr->rx_jmb_prod_idx;
7112 for (i = di; i < di + cpycnt; i++) {
7113 if (dpr->rx_jmb_buffers[i].data) {
7123 /* Ensure that updates to the rx_jmb_buffers ring and the
7124 * shadowed hardware producer ring from tg3_recycle_skb() are
7125 * ordered correctly WRT the skb check above.
7129 memcpy(&dpr->rx_jmb_buffers[di],
7130 &spr->rx_jmb_buffers[si],
7131 cpycnt * sizeof(struct ring_info));
7133 for (i = 0; i < cpycnt; i++, di++, si++) {
7134 struct tg3_rx_buffer_desc *sbd, *dbd;
7135 sbd = &spr->rx_jmb[si].std;
7136 dbd = &dpr->rx_jmb[di].std;
7137 dbd->addr_hi = sbd->addr_hi;
7138 dbd->addr_lo = sbd->addr_lo;
7141 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7142 tp->rx_jmb_ring_mask;
7143 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7144 tp->rx_jmb_ring_mask;
7150 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7152 struct tg3 *tp = tnapi->tp;
7154 /* run TX completion thread */
7155 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7157 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7161 if (!tnapi->rx_rcb_prod_idx)
7164 /* run RX thread, within the bounds set by NAPI.
7165 * All RX "locking" is done by ensuring outside
7166 * code synchronizes with tg3->napi.poll()
7168 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7169 work_done += tg3_rx(tnapi, budget - work_done);
7171 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7172 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7174 u32 std_prod_idx = dpr->rx_std_prod_idx;
7175 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7177 tp->rx_refill = false;
7178 for (i = 1; i <= tp->rxq_cnt; i++)
7179 err |= tg3_rx_prodring_xfer(tp, dpr,
7180 &tp->napi[i].prodring);
7184 if (std_prod_idx != dpr->rx_std_prod_idx)
7185 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7186 dpr->rx_std_prod_idx);
7188 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7189 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7190 dpr->rx_jmb_prod_idx);
7195 tw32_f(HOSTCC_MODE, tp->coal_now);
7201 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7203 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7204 schedule_work(&tp->reset_task);
7207 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7209 cancel_work_sync(&tp->reset_task);
7210 tg3_flag_clear(tp, RESET_TASK_PENDING);
7211 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7214 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7216 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7217 struct tg3 *tp = tnapi->tp;
7219 struct tg3_hw_status *sblk = tnapi->hw_status;
7222 work_done = tg3_poll_work(tnapi, work_done, budget);
7224 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7227 if (unlikely(work_done >= budget))
7230 /* tp->last_tag is used in tg3_int_reenable() below
7231 * to tell the hw how much work has been processed,
7232 * so we must read it before checking for more work.
7234 tnapi->last_tag = sblk->status_tag;
7235 tnapi->last_irq_tag = tnapi->last_tag;
7238 /* check for RX/TX work to do */
7239 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7240 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7242 /* This test here is not race free, but will reduce
7243 * the number of interrupts by looping again.
7245 if (tnapi == &tp->napi[1] && tp->rx_refill)
7248 napi_complete(napi);
7249 /* Reenable interrupts. */
7250 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7252 /* This test here is synchronized by napi_schedule()
7253 * and napi_complete() to close the race condition.
7255 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7256 tw32(HOSTCC_MODE, tp->coalesce_mode |
7257 HOSTCC_MODE_ENABLE |
7268 /* work_done is guaranteed to be less than budget. */
7269 napi_complete(napi);
7270 tg3_reset_task_schedule(tp);
7274 static void tg3_process_error(struct tg3 *tp)
7277 bool real_error = false;
7279 if (tg3_flag(tp, ERROR_PROCESSED))
7282 /* Check Flow Attention register */
7283 val = tr32(HOSTCC_FLOW_ATTN);
7284 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7285 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7289 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7290 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7294 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7295 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7304 tg3_flag_set(tp, ERROR_PROCESSED);
7305 tg3_reset_task_schedule(tp);
7308 static int tg3_poll(struct napi_struct *napi, int budget)
7310 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7311 struct tg3 *tp = tnapi->tp;
7313 struct tg3_hw_status *sblk = tnapi->hw_status;
7316 if (sblk->status & SD_STATUS_ERROR)
7317 tg3_process_error(tp);
7321 work_done = tg3_poll_work(tnapi, work_done, budget);
7323 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7326 if (unlikely(work_done >= budget))
7329 if (tg3_flag(tp, TAGGED_STATUS)) {
7330 /* tp->last_tag is used in tg3_int_reenable() below
7331 * to tell the hw how much work has been processed,
7332 * so we must read it before checking for more work.
7334 tnapi->last_tag = sblk->status_tag;
7335 tnapi->last_irq_tag = tnapi->last_tag;
7338 sblk->status &= ~SD_STATUS_UPDATED;
7340 if (likely(!tg3_has_work(tnapi))) {
7341 napi_complete(napi);
7342 tg3_int_reenable(tnapi);
7350 /* work_done is guaranteed to be less than budget. */
7351 napi_complete(napi);
7352 tg3_reset_task_schedule(tp);
7356 static void tg3_napi_disable(struct tg3 *tp)
7360 for (i = tp->irq_cnt - 1; i >= 0; i--)
7361 napi_disable(&tp->napi[i].napi);
7364 static void tg3_napi_enable(struct tg3 *tp)
7368 for (i = 0; i < tp->irq_cnt; i++)
7369 napi_enable(&tp->napi[i].napi);
7372 static void tg3_napi_init(struct tg3 *tp)
7376 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7377 for (i = 1; i < tp->irq_cnt; i++)
7378 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7381 static void tg3_napi_fini(struct tg3 *tp)
7385 for (i = 0; i < tp->irq_cnt; i++)
7386 netif_napi_del(&tp->napi[i].napi);
7389 static inline void tg3_netif_stop(struct tg3 *tp)
7391 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7392 tg3_napi_disable(tp);
7393 netif_carrier_off(tp->dev);
7394 netif_tx_disable(tp->dev);
7397 /* tp->lock must be held */
7398 static inline void tg3_netif_start(struct tg3 *tp)
7402 /* NOTE: unconditional netif_tx_wake_all_queues is only
7403 * appropriate so long as all callers are assured to
7404 * have free tx slots (such as after tg3_init_hw)
7406 netif_tx_wake_all_queues(tp->dev);
7409 netif_carrier_on(tp->dev);
7411 tg3_napi_enable(tp);
7412 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7413 tg3_enable_ints(tp);
7416 static void tg3_irq_quiesce(struct tg3 *tp)
7420 BUG_ON(tp->irq_sync);
7425 for (i = 0; i < tp->irq_cnt; i++)
7426 synchronize_irq(tp->napi[i].irq_vec);
7429 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7430 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7431 * with as well. Most of the time, this is not necessary except when
7432 * shutting down the device.
7434 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7436 spin_lock_bh(&tp->lock);
7438 tg3_irq_quiesce(tp);
7441 static inline void tg3_full_unlock(struct tg3 *tp)
7443 spin_unlock_bh(&tp->lock);
7446 /* One-shot MSI handler - Chip automatically disables interrupt
7447 * after sending MSI so driver doesn't have to do it.
7449 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7451 struct tg3_napi *tnapi = dev_id;
7452 struct tg3 *tp = tnapi->tp;
7454 prefetch(tnapi->hw_status);
7456 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7458 if (likely(!tg3_irq_sync(tp)))
7459 napi_schedule(&tnapi->napi);
7464 /* MSI ISR - No need to check for interrupt sharing and no need to
7465 * flush status block and interrupt mailbox. PCI ordering rules
7466 * guarantee that MSI will arrive after the status block.
7468 static irqreturn_t tg3_msi(int irq, void *dev_id)
7470 struct tg3_napi *tnapi = dev_id;
7471 struct tg3 *tp = tnapi->tp;
7473 prefetch(tnapi->hw_status);
7475 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7477 * Writing any value to intr-mbox-0 clears PCI INTA# and
7478 * chip-internal interrupt pending events.
7479 * Writing non-zero to intr-mbox-0 additional tells the
7480 * NIC to stop sending us irqs, engaging "in-intr-handler"
7483 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7484 if (likely(!tg3_irq_sync(tp)))
7485 napi_schedule(&tnapi->napi);
7487 return IRQ_RETVAL(1);
7490 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7492 struct tg3_napi *tnapi = dev_id;
7493 struct tg3 *tp = tnapi->tp;
7494 struct tg3_hw_status *sblk = tnapi->hw_status;
7495 unsigned int handled = 1;
7497 /* In INTx mode, it is possible for the interrupt to arrive at
7498 * the CPU before the status block posted prior to the interrupt.
7499 * Reading the PCI State register will confirm whether the
7500 * interrupt is ours and will flush the status block.
7502 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7503 if (tg3_flag(tp, CHIP_RESETTING) ||
7504 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7511 * Writing any value to intr-mbox-0 clears PCI INTA# and
7512 * chip-internal interrupt pending events.
7513 * Writing non-zero to intr-mbox-0 additional tells the
7514 * NIC to stop sending us irqs, engaging "in-intr-handler"
7517 * Flush the mailbox to de-assert the IRQ immediately to prevent
7518 * spurious interrupts. The flush impacts performance but
7519 * excessive spurious interrupts can be worse in some cases.
7521 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7522 if (tg3_irq_sync(tp))
7524 sblk->status &= ~SD_STATUS_UPDATED;
7525 if (likely(tg3_has_work(tnapi))) {
7526 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7527 napi_schedule(&tnapi->napi);
7529 /* No work, shared interrupt perhaps? re-enable
7530 * interrupts, and flush that PCI write
7532 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7536 return IRQ_RETVAL(handled);
7539 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7541 struct tg3_napi *tnapi = dev_id;
7542 struct tg3 *tp = tnapi->tp;
7543 struct tg3_hw_status *sblk = tnapi->hw_status;
7544 unsigned int handled = 1;
7546 /* In INTx mode, it is possible for the interrupt to arrive at
7547 * the CPU before the status block posted prior to the interrupt.
7548 * Reading the PCI State register will confirm whether the
7549 * interrupt is ours and will flush the status block.
7551 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7552 if (tg3_flag(tp, CHIP_RESETTING) ||
7553 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7560 * writing any value to intr-mbox-0 clears PCI INTA# and
7561 * chip-internal interrupt pending events.
7562 * writing non-zero to intr-mbox-0 additional tells the
7563 * NIC to stop sending us irqs, engaging "in-intr-handler"
7566 * Flush the mailbox to de-assert the IRQ immediately to prevent
7567 * spurious interrupts. The flush impacts performance but
7568 * excessive spurious interrupts can be worse in some cases.
7570 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7573 * In a shared interrupt configuration, sometimes other devices'
7574 * interrupts will scream. We record the current status tag here
7575 * so that the above check can report that the screaming interrupts
7576 * are unhandled. Eventually they will be silenced.
7578 tnapi->last_irq_tag = sblk->status_tag;
7580 if (tg3_irq_sync(tp))
7583 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7585 napi_schedule(&tnapi->napi);
7588 return IRQ_RETVAL(handled);
7591 /* ISR for interrupt test */
7592 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7594 struct tg3_napi *tnapi = dev_id;
7595 struct tg3 *tp = tnapi->tp;
7596 struct tg3_hw_status *sblk = tnapi->hw_status;
7598 if ((sblk->status & SD_STATUS_UPDATED) ||
7599 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7600 tg3_disable_ints(tp);
7601 return IRQ_RETVAL(1);
7603 return IRQ_RETVAL(0);
7606 #ifdef CONFIG_NET_POLL_CONTROLLER
7607 static void tg3_poll_controller(struct net_device *dev)
7610 struct tg3 *tp = netdev_priv(dev);
7612 if (tg3_irq_sync(tp))
7615 for (i = 0; i < tp->irq_cnt; i++)
7616 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7620 static void tg3_tx_timeout(struct net_device *dev)
7622 struct tg3 *tp = netdev_priv(dev);
7624 if (netif_msg_tx_err(tp)) {
7625 netdev_err(dev, "transmit timed out, resetting\n");
7629 tg3_reset_task_schedule(tp);
7632 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7633 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7635 u32 base = (u32) mapping & 0xffffffff;
7637 return (base > 0xffffdcc0) && (base + len + 8 < base);
7640 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7641 * of any 4GB boundaries: 4G, 8G, etc
7643 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7646 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7647 u32 base = (u32) mapping & 0xffffffff;
7649 return ((base + len + (mss & 0x3fff)) < base);
7654 /* Test for DMA addresses > 40-bit */
7655 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7658 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7659 if (tg3_flag(tp, 40BIT_DMA_BUG))
7660 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7667 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7668 dma_addr_t mapping, u32 len, u32 flags,
7671 txbd->addr_hi = ((u64) mapping >> 32);
7672 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7673 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7674 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7677 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7678 dma_addr_t map, u32 len, u32 flags,
7681 struct tg3 *tp = tnapi->tp;
7684 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7687 if (tg3_4g_overflow_test(map, len))
7690 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7693 if (tg3_40bit_overflow_test(tp, map, len))
7696 if (tp->dma_limit) {
7697 u32 prvidx = *entry;
7698 u32 tmp_flag = flags & ~TXD_FLAG_END;
7699 while (len > tp->dma_limit && *budget) {
7700 u32 frag_len = tp->dma_limit;
7701 len -= tp->dma_limit;
7703 /* Avoid the 8byte DMA problem */
7705 len += tp->dma_limit / 2;
7706 frag_len = tp->dma_limit / 2;
7709 tnapi->tx_buffers[*entry].fragmented = true;
7711 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7712 frag_len, tmp_flag, mss, vlan);
7715 *entry = NEXT_TX(*entry);
7722 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7723 len, flags, mss, vlan);
7725 *entry = NEXT_TX(*entry);
7728 tnapi->tx_buffers[prvidx].fragmented = false;
7732 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7733 len, flags, mss, vlan);
7734 *entry = NEXT_TX(*entry);
7740 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7743 struct sk_buff *skb;
7744 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7749 pci_unmap_single(tnapi->tp->pdev,
7750 dma_unmap_addr(txb, mapping),
7754 while (txb->fragmented) {
7755 txb->fragmented = false;
7756 entry = NEXT_TX(entry);
7757 txb = &tnapi->tx_buffers[entry];
7760 for (i = 0; i <= last; i++) {
7761 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7763 entry = NEXT_TX(entry);
7764 txb = &tnapi->tx_buffers[entry];
7766 pci_unmap_page(tnapi->tp->pdev,
7767 dma_unmap_addr(txb, mapping),
7768 skb_frag_size(frag), PCI_DMA_TODEVICE);
7770 while (txb->fragmented) {
7771 txb->fragmented = false;
7772 entry = NEXT_TX(entry);
7773 txb = &tnapi->tx_buffers[entry];
7778 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7779 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7780 struct sk_buff **pskb,
7781 u32 *entry, u32 *budget,
7782 u32 base_flags, u32 mss, u32 vlan)
7784 struct tg3 *tp = tnapi->tp;
7785 struct sk_buff *new_skb, *skb = *pskb;
7786 dma_addr_t new_addr = 0;
7789 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7790 new_skb = skb_copy(skb, GFP_ATOMIC);
7792 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7794 new_skb = skb_copy_expand(skb,
7795 skb_headroom(skb) + more_headroom,
7796 skb_tailroom(skb), GFP_ATOMIC);
7802 /* New SKB is guaranteed to be linear. */
7803 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7805 /* Make sure the mapping succeeded */
7806 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7807 dev_kfree_skb(new_skb);
7810 u32 save_entry = *entry;
7812 base_flags |= TXD_FLAG_END;
7814 tnapi->tx_buffers[*entry].skb = new_skb;
7815 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7818 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7819 new_skb->len, base_flags,
7821 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7822 dev_kfree_skb(new_skb);
7833 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7835 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7836 * TSO header is greater than 80 bytes.
7838 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7840 struct sk_buff *segs, *nskb;
7841 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7843 /* Estimate the number of fragments in the worst case */
7844 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7845 netif_stop_queue(tp->dev);
7847 /* netif_tx_stop_queue() must be done before checking
7848 * checking tx index in tg3_tx_avail() below, because in
7849 * tg3_tx(), we update tx index before checking for
7850 * netif_tx_queue_stopped().
7853 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7854 return NETDEV_TX_BUSY;
7856 netif_wake_queue(tp->dev);
7859 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7861 goto tg3_tso_bug_end;
7867 tg3_start_xmit(nskb, tp->dev);
7873 return NETDEV_TX_OK;
7876 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7877 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7879 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7881 struct tg3 *tp = netdev_priv(dev);
7882 u32 len, entry, base_flags, mss, vlan = 0;
7884 int i = -1, would_hit_hwbug;
7886 struct tg3_napi *tnapi;
7887 struct netdev_queue *txq;
7890 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7891 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7892 if (tg3_flag(tp, ENABLE_TSS))
7895 budget = tg3_tx_avail(tnapi);
7897 /* We are running in BH disabled context with netif_tx_lock
7898 * and TX reclaim runs via tp->napi.poll inside of a software
7899 * interrupt. Furthermore, IRQ processing runs lockless so we have
7900 * no IRQ context deadlocks to worry about either. Rejoice!
7902 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7903 if (!netif_tx_queue_stopped(txq)) {
7904 netif_tx_stop_queue(txq);
7906 /* This is a hard error, log it. */
7908 "BUG! Tx Ring full when queue awake!\n");
7910 return NETDEV_TX_BUSY;
7913 entry = tnapi->tx_prod;
7915 if (skb->ip_summed == CHECKSUM_PARTIAL)
7916 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7918 mss = skb_shinfo(skb)->gso_size;
7921 u32 tcp_opt_len, hdr_len;
7923 if (skb_header_cloned(skb) &&
7924 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7928 tcp_opt_len = tcp_optlen(skb);
7930 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7932 if (!skb_is_gso_v6(skb)) {
7934 iph->tot_len = htons(mss + hdr_len);
7937 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7938 tg3_flag(tp, TSO_BUG))
7939 return tg3_tso_bug(tp, skb);
7941 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7942 TXD_FLAG_CPU_POST_DMA);
7944 if (tg3_flag(tp, HW_TSO_1) ||
7945 tg3_flag(tp, HW_TSO_2) ||
7946 tg3_flag(tp, HW_TSO_3)) {
7947 tcp_hdr(skb)->check = 0;
7948 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7950 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7955 if (tg3_flag(tp, HW_TSO_3)) {
7956 mss |= (hdr_len & 0xc) << 12;
7958 base_flags |= 0x00000010;
7959 base_flags |= (hdr_len & 0x3e0) << 5;
7960 } else if (tg3_flag(tp, HW_TSO_2))
7961 mss |= hdr_len << 9;
7962 else if (tg3_flag(tp, HW_TSO_1) ||
7963 tg3_asic_rev(tp) == ASIC_REV_5705) {
7964 if (tcp_opt_len || iph->ihl > 5) {
7967 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7968 mss |= (tsflags << 11);
7971 if (tcp_opt_len || iph->ihl > 5) {
7974 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7975 base_flags |= tsflags << 12;
7980 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7981 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7982 base_flags |= TXD_FLAG_JMB_PKT;
7984 if (vlan_tx_tag_present(skb)) {
7985 base_flags |= TXD_FLAG_VLAN;
7986 vlan = vlan_tx_tag_get(skb);
7989 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7990 tg3_flag(tp, TX_TSTAMP_EN)) {
7991 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7992 base_flags |= TXD_FLAG_HWTSTAMP;
7995 len = skb_headlen(skb);
7997 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7998 if (pci_dma_mapping_error(tp->pdev, mapping))
8002 tnapi->tx_buffers[entry].skb = skb;
8003 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8005 would_hit_hwbug = 0;
8007 if (tg3_flag(tp, 5701_DMA_BUG))
8008 would_hit_hwbug = 1;
8010 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8011 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8013 would_hit_hwbug = 1;
8014 } else if (skb_shinfo(skb)->nr_frags > 0) {
8017 if (!tg3_flag(tp, HW_TSO_1) &&
8018 !tg3_flag(tp, HW_TSO_2) &&
8019 !tg3_flag(tp, HW_TSO_3))
8022 /* Now loop through additional data
8023 * fragments, and queue them.
8025 last = skb_shinfo(skb)->nr_frags - 1;
8026 for (i = 0; i <= last; i++) {
8027 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8029 len = skb_frag_size(frag);
8030 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8031 len, DMA_TO_DEVICE);
8033 tnapi->tx_buffers[entry].skb = NULL;
8034 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8036 if (dma_mapping_error(&tp->pdev->dev, mapping))
8040 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8042 ((i == last) ? TXD_FLAG_END : 0),
8044 would_hit_hwbug = 1;
8050 if (would_hit_hwbug) {
8051 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8053 /* If the workaround fails due to memory/mapping
8054 * failure, silently drop this packet.
8056 entry = tnapi->tx_prod;
8057 budget = tg3_tx_avail(tnapi);
8058 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8059 base_flags, mss, vlan))
8063 skb_tx_timestamp(skb);
8064 netdev_tx_sent_queue(txq, skb->len);
8066 /* Sync BD data before updating mailbox */
8069 /* Packets are ready, update Tx producer idx local and on card. */
8070 tw32_tx_mbox(tnapi->prodmbox, entry);
8072 tnapi->tx_prod = entry;
8073 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8074 netif_tx_stop_queue(txq);
8076 /* netif_tx_stop_queue() must be done before checking
8077 * checking tx index in tg3_tx_avail() below, because in
8078 * tg3_tx(), we update tx index before checking for
8079 * netif_tx_queue_stopped().
8082 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8083 netif_tx_wake_queue(txq);
8087 return NETDEV_TX_OK;
8090 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8091 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8096 return NETDEV_TX_OK;
8099 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8102 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8103 MAC_MODE_PORT_MODE_MASK);
8105 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8107 if (!tg3_flag(tp, 5705_PLUS))
8108 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8110 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8111 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8113 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8115 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8117 if (tg3_flag(tp, 5705_PLUS) ||
8118 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8119 tg3_asic_rev(tp) == ASIC_REV_5700)
8120 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8123 tw32(MAC_MODE, tp->mac_mode);
8127 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8129 u32 val, bmcr, mac_mode, ptest = 0;
8131 tg3_phy_toggle_apd(tp, false);
8132 tg3_phy_toggle_automdix(tp, false);
8134 if (extlpbk && tg3_phy_set_extloopbk(tp))
8137 bmcr = BMCR_FULLDPLX;
8142 bmcr |= BMCR_SPEED100;
8146 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8148 bmcr |= BMCR_SPEED100;
8151 bmcr |= BMCR_SPEED1000;
8156 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8157 tg3_readphy(tp, MII_CTRL1000, &val);
8158 val |= CTL1000_AS_MASTER |
8159 CTL1000_ENABLE_MASTER;
8160 tg3_writephy(tp, MII_CTRL1000, val);
8162 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8163 MII_TG3_FET_PTEST_TRIM_2;
8164 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8167 bmcr |= BMCR_LOOPBACK;
8169 tg3_writephy(tp, MII_BMCR, bmcr);
8171 /* The write needs to be flushed for the FETs */
8172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8173 tg3_readphy(tp, MII_BMCR, &bmcr);
8177 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8178 tg3_asic_rev(tp) == ASIC_REV_5785) {
8179 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8180 MII_TG3_FET_PTEST_FRC_TX_LINK |
8181 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8183 /* The write needs to be flushed for the AC131 */
8184 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8187 /* Reset to prevent losing 1st rx packet intermittently */
8188 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8189 tg3_flag(tp, 5780_CLASS)) {
8190 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8192 tw32_f(MAC_RX_MODE, tp->rx_mode);
8195 mac_mode = tp->mac_mode &
8196 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8197 if (speed == SPEED_1000)
8198 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8200 mac_mode |= MAC_MODE_PORT_MODE_MII;
8202 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8203 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8205 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8206 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8207 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8208 mac_mode |= MAC_MODE_LINK_POLARITY;
8210 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8211 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8214 tw32(MAC_MODE, mac_mode);
8220 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8222 struct tg3 *tp = netdev_priv(dev);
8224 if (features & NETIF_F_LOOPBACK) {
8225 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8228 spin_lock_bh(&tp->lock);
8229 tg3_mac_loopback(tp, true);
8230 netif_carrier_on(tp->dev);
8231 spin_unlock_bh(&tp->lock);
8232 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8234 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8237 spin_lock_bh(&tp->lock);
8238 tg3_mac_loopback(tp, false);
8239 /* Force link status check */
8240 tg3_setup_phy(tp, true);
8241 spin_unlock_bh(&tp->lock);
8242 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8246 static netdev_features_t tg3_fix_features(struct net_device *dev,
8247 netdev_features_t features)
8249 struct tg3 *tp = netdev_priv(dev);
8251 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8252 features &= ~NETIF_F_ALL_TSO;
8257 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8259 netdev_features_t changed = dev->features ^ features;
8261 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8262 tg3_set_loopback(dev, features);
8267 static void tg3_rx_prodring_free(struct tg3 *tp,
8268 struct tg3_rx_prodring_set *tpr)
8272 if (tpr != &tp->napi[0].prodring) {
8273 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8274 i = (i + 1) & tp->rx_std_ring_mask)
8275 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8278 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8279 for (i = tpr->rx_jmb_cons_idx;
8280 i != tpr->rx_jmb_prod_idx;
8281 i = (i + 1) & tp->rx_jmb_ring_mask) {
8282 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8290 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8291 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8294 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8295 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8296 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8301 /* Initialize rx rings for packet processing.
8303 * The chip has been shut down and the driver detached from
8304 * the networking, so no interrupts or new tx packets will
8305 * end up in the driver. tp->{tx,}lock are held and thus
8308 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8309 struct tg3_rx_prodring_set *tpr)
8311 u32 i, rx_pkt_dma_sz;
8313 tpr->rx_std_cons_idx = 0;
8314 tpr->rx_std_prod_idx = 0;
8315 tpr->rx_jmb_cons_idx = 0;
8316 tpr->rx_jmb_prod_idx = 0;
8318 if (tpr != &tp->napi[0].prodring) {
8319 memset(&tpr->rx_std_buffers[0], 0,
8320 TG3_RX_STD_BUFF_RING_SIZE(tp));
8321 if (tpr->rx_jmb_buffers)
8322 memset(&tpr->rx_jmb_buffers[0], 0,
8323 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8327 /* Zero out all descriptors. */
8328 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8330 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8331 if (tg3_flag(tp, 5780_CLASS) &&
8332 tp->dev->mtu > ETH_DATA_LEN)
8333 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8334 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8336 /* Initialize invariants of the rings, we only set this
8337 * stuff once. This works because the card does not
8338 * write into the rx buffer posting rings.
8340 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8341 struct tg3_rx_buffer_desc *rxd;
8343 rxd = &tpr->rx_std[i];
8344 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8345 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8346 rxd->opaque = (RXD_OPAQUE_RING_STD |
8347 (i << RXD_OPAQUE_INDEX_SHIFT));
8350 /* Now allocate fresh SKBs for each rx ring. */
8351 for (i = 0; i < tp->rx_pending; i++) {
8352 unsigned int frag_size;
8354 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8356 netdev_warn(tp->dev,
8357 "Using a smaller RX standard ring. Only "
8358 "%d out of %d buffers were allocated "
8359 "successfully\n", i, tp->rx_pending);
8367 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8370 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8372 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8375 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8376 struct tg3_rx_buffer_desc *rxd;
8378 rxd = &tpr->rx_jmb[i].std;
8379 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8380 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8382 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8383 (i << RXD_OPAQUE_INDEX_SHIFT));
8386 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8387 unsigned int frag_size;
8389 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8391 netdev_warn(tp->dev,
8392 "Using a smaller RX jumbo ring. Only %d "
8393 "out of %d buffers were allocated "
8394 "successfully\n", i, tp->rx_jumbo_pending);
8397 tp->rx_jumbo_pending = i;
8406 tg3_rx_prodring_free(tp, tpr);
8410 static void tg3_rx_prodring_fini(struct tg3 *tp,
8411 struct tg3_rx_prodring_set *tpr)
8413 kfree(tpr->rx_std_buffers);
8414 tpr->rx_std_buffers = NULL;
8415 kfree(tpr->rx_jmb_buffers);
8416 tpr->rx_jmb_buffers = NULL;
8418 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8419 tpr->rx_std, tpr->rx_std_mapping);
8423 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8424 tpr->rx_jmb, tpr->rx_jmb_mapping);
8429 static int tg3_rx_prodring_init(struct tg3 *tp,
8430 struct tg3_rx_prodring_set *tpr)
8432 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8434 if (!tpr->rx_std_buffers)
8437 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8438 TG3_RX_STD_RING_BYTES(tp),
8439 &tpr->rx_std_mapping,
8444 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8445 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8447 if (!tpr->rx_jmb_buffers)
8450 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8451 TG3_RX_JMB_RING_BYTES(tp),
8452 &tpr->rx_jmb_mapping,
8461 tg3_rx_prodring_fini(tp, tpr);
8465 /* Free up pending packets in all rx/tx rings.
8467 * The chip has been shut down and the driver detached from
8468 * the networking, so no interrupts or new tx packets will
8469 * end up in the driver. tp->{tx,}lock is not held and we are not
8470 * in an interrupt context and thus may sleep.
8472 static void tg3_free_rings(struct tg3 *tp)
8476 for (j = 0; j < tp->irq_cnt; j++) {
8477 struct tg3_napi *tnapi = &tp->napi[j];
8479 tg3_rx_prodring_free(tp, &tnapi->prodring);
8481 if (!tnapi->tx_buffers)
8484 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8485 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8490 tg3_tx_skb_unmap(tnapi, i,
8491 skb_shinfo(skb)->nr_frags - 1);
8493 dev_kfree_skb_any(skb);
8495 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8499 /* Initialize tx/rx rings for packet processing.
8501 * The chip has been shut down and the driver detached from
8502 * the networking, so no interrupts or new tx packets will
8503 * end up in the driver. tp->{tx,}lock are held and thus
8506 static int tg3_init_rings(struct tg3 *tp)
8510 /* Free up all the SKBs. */
8513 for (i = 0; i < tp->irq_cnt; i++) {
8514 struct tg3_napi *tnapi = &tp->napi[i];
8516 tnapi->last_tag = 0;
8517 tnapi->last_irq_tag = 0;
8518 tnapi->hw_status->status = 0;
8519 tnapi->hw_status->status_tag = 0;
8520 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8525 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8527 tnapi->rx_rcb_ptr = 0;
8529 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8531 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8540 static void tg3_mem_tx_release(struct tg3 *tp)
8544 for (i = 0; i < tp->irq_max; i++) {
8545 struct tg3_napi *tnapi = &tp->napi[i];
8547 if (tnapi->tx_ring) {
8548 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8549 tnapi->tx_ring, tnapi->tx_desc_mapping);
8550 tnapi->tx_ring = NULL;
8553 kfree(tnapi->tx_buffers);
8554 tnapi->tx_buffers = NULL;
8558 static int tg3_mem_tx_acquire(struct tg3 *tp)
8561 struct tg3_napi *tnapi = &tp->napi[0];
8563 /* If multivector TSS is enabled, vector 0 does not handle
8564 * tx interrupts. Don't allocate any resources for it.
8566 if (tg3_flag(tp, ENABLE_TSS))
8569 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8570 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8571 TG3_TX_RING_SIZE, GFP_KERNEL);
8572 if (!tnapi->tx_buffers)
8575 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8577 &tnapi->tx_desc_mapping,
8579 if (!tnapi->tx_ring)
8586 tg3_mem_tx_release(tp);
8590 static void tg3_mem_rx_release(struct tg3 *tp)
8594 for (i = 0; i < tp->irq_max; i++) {
8595 struct tg3_napi *tnapi = &tp->napi[i];
8597 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8602 dma_free_coherent(&tp->pdev->dev,
8603 TG3_RX_RCB_RING_BYTES(tp),
8605 tnapi->rx_rcb_mapping);
8606 tnapi->rx_rcb = NULL;
8610 static int tg3_mem_rx_acquire(struct tg3 *tp)
8612 unsigned int i, limit;
8614 limit = tp->rxq_cnt;
8616 /* If RSS is enabled, we need a (dummy) producer ring
8617 * set on vector zero. This is the true hw prodring.
8619 if (tg3_flag(tp, ENABLE_RSS))
8622 for (i = 0; i < limit; i++) {
8623 struct tg3_napi *tnapi = &tp->napi[i];
8625 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8628 /* If multivector RSS is enabled, vector 0
8629 * does not handle rx or tx interrupts.
8630 * Don't allocate any resources for it.
8632 if (!i && tg3_flag(tp, ENABLE_RSS))
8635 tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8636 TG3_RX_RCB_RING_BYTES(tp),
8637 &tnapi->rx_rcb_mapping,
8646 tg3_mem_rx_release(tp);
8651 * Must not be invoked with interrupt sources disabled and
8652 * the hardware shutdown down.
8654 static void tg3_free_consistent(struct tg3 *tp)
8658 for (i = 0; i < tp->irq_cnt; i++) {
8659 struct tg3_napi *tnapi = &tp->napi[i];
8661 if (tnapi->hw_status) {
8662 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8664 tnapi->status_mapping);
8665 tnapi->hw_status = NULL;
8669 tg3_mem_rx_release(tp);
8670 tg3_mem_tx_release(tp);
8673 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8674 tp->hw_stats, tp->stats_mapping);
8675 tp->hw_stats = NULL;
8680 * Must not be invoked with interrupt sources disabled and
8681 * the hardware shutdown down. Can sleep.
8683 static int tg3_alloc_consistent(struct tg3 *tp)
8687 tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8688 sizeof(struct tg3_hw_stats),
8689 &tp->stats_mapping, GFP_KERNEL);
8693 for (i = 0; i < tp->irq_cnt; i++) {
8694 struct tg3_napi *tnapi = &tp->napi[i];
8695 struct tg3_hw_status *sblk;
8697 tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8699 &tnapi->status_mapping,
8701 if (!tnapi->hw_status)
8704 sblk = tnapi->hw_status;
8706 if (tg3_flag(tp, ENABLE_RSS)) {
8707 u16 *prodptr = NULL;
8710 * When RSS is enabled, the status block format changes
8711 * slightly. The "rx_jumbo_consumer", "reserved",
8712 * and "rx_mini_consumer" members get mapped to the
8713 * other three rx return ring producer indexes.
8717 prodptr = &sblk->idx[0].rx_producer;
8720 prodptr = &sblk->rx_jumbo_consumer;
8723 prodptr = &sblk->reserved;
8726 prodptr = &sblk->rx_mini_consumer;
8729 tnapi->rx_rcb_prod_idx = prodptr;
8731 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8735 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8741 tg3_free_consistent(tp);
8745 #define MAX_WAIT_CNT 1000
8747 /* To stop a block, clear the enable bit and poll till it
8748 * clears. tp->lock is held.
8750 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8755 if (tg3_flag(tp, 5705_PLUS)) {
8762 /* We can't enable/disable these bits of the
8763 * 5705/5750, just say success.
8776 for (i = 0; i < MAX_WAIT_CNT; i++) {
8777 if (pci_channel_offline(tp->pdev)) {
8778 dev_err(&tp->pdev->dev,
8779 "tg3_stop_block device offline, "
8780 "ofs=%lx enable_bit=%x\n",
8787 if ((val & enable_bit) == 0)
8791 if (i == MAX_WAIT_CNT && !silent) {
8792 dev_err(&tp->pdev->dev,
8793 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8801 /* tp->lock is held. */
8802 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8806 tg3_disable_ints(tp);
8808 if (pci_channel_offline(tp->pdev)) {
8809 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8810 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8815 tp->rx_mode &= ~RX_MODE_ENABLE;
8816 tw32_f(MAC_RX_MODE, tp->rx_mode);
8819 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8820 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8821 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8822 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8823 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8824 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8826 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8827 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8828 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8829 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8830 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8831 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8832 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8834 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8835 tw32_f(MAC_MODE, tp->mac_mode);
8838 tp->tx_mode &= ~TX_MODE_ENABLE;
8839 tw32_f(MAC_TX_MODE, tp->tx_mode);
8841 for (i = 0; i < MAX_WAIT_CNT; i++) {
8843 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8846 if (i >= MAX_WAIT_CNT) {
8847 dev_err(&tp->pdev->dev,
8848 "%s timed out, TX_MODE_ENABLE will not clear "
8849 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8853 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8854 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8855 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8857 tw32(FTQ_RESET, 0xffffffff);
8858 tw32(FTQ_RESET, 0x00000000);
8860 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8861 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8864 for (i = 0; i < tp->irq_cnt; i++) {
8865 struct tg3_napi *tnapi = &tp->napi[i];
8866 if (tnapi->hw_status)
8867 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8873 /* Save PCI command register before chip reset */
8874 static void tg3_save_pci_state(struct tg3 *tp)
8876 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8879 /* Restore PCI state after chip reset */
8880 static void tg3_restore_pci_state(struct tg3 *tp)
8884 /* Re-enable indirect register accesses. */
8885 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8886 tp->misc_host_ctrl);
8888 /* Set MAX PCI retry to zero. */
8889 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8890 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8891 tg3_flag(tp, PCIX_MODE))
8892 val |= PCISTATE_RETRY_SAME_DMA;
8893 /* Allow reads and writes to the APE register and memory space. */
8894 if (tg3_flag(tp, ENABLE_APE))
8895 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8896 PCISTATE_ALLOW_APE_SHMEM_WR |
8897 PCISTATE_ALLOW_APE_PSPACE_WR;
8898 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8900 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8902 if (!tg3_flag(tp, PCI_EXPRESS)) {
8903 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8904 tp->pci_cacheline_sz);
8905 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8909 /* Make sure PCI-X relaxed ordering bit is clear. */
8910 if (tg3_flag(tp, PCIX_MODE)) {
8913 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8915 pcix_cmd &= ~PCI_X_CMD_ERO;
8916 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8920 if (tg3_flag(tp, 5780_CLASS)) {
8922 /* Chip reset on 5780 will reset MSI enable bit,
8923 * so need to restore it.
8925 if (tg3_flag(tp, USING_MSI)) {
8928 pci_read_config_word(tp->pdev,
8929 tp->msi_cap + PCI_MSI_FLAGS,
8931 pci_write_config_word(tp->pdev,
8932 tp->msi_cap + PCI_MSI_FLAGS,
8933 ctrl | PCI_MSI_FLAGS_ENABLE);
8934 val = tr32(MSGINT_MODE);
8935 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8940 /* tp->lock is held. */
8941 static int tg3_chip_reset(struct tg3 *tp)
8944 void (*write_op)(struct tg3 *, u32, u32);
8947 if (!pci_device_is_present(tp->pdev))
8952 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8954 /* No matching tg3_nvram_unlock() after this because
8955 * chip reset below will undo the nvram lock.
8957 tp->nvram_lock_cnt = 0;
8959 /* GRC_MISC_CFG core clock reset will clear the memory
8960 * enable bit in PCI register 4 and the MSI enable bit
8961 * on some chips, so we save relevant registers here.
8963 tg3_save_pci_state(tp);
8965 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8966 tg3_flag(tp, 5755_PLUS))
8967 tw32(GRC_FASTBOOT_PC, 0);
8970 * We must avoid the readl() that normally takes place.
8971 * It locks machines, causes machine checks, and other
8972 * fun things. So, temporarily disable the 5701
8973 * hardware workaround, while we do the reset.
8975 write_op = tp->write32;
8976 if (write_op == tg3_write_flush_reg32)
8977 tp->write32 = tg3_write32;
8979 /* Prevent the irq handler from reading or writing PCI registers
8980 * during chip reset when the memory enable bit in the PCI command
8981 * register may be cleared. The chip does not generate interrupt
8982 * at this time, but the irq handler may still be called due to irq
8983 * sharing or irqpoll.
8985 tg3_flag_set(tp, CHIP_RESETTING);
8986 for (i = 0; i < tp->irq_cnt; i++) {
8987 struct tg3_napi *tnapi = &tp->napi[i];
8988 if (tnapi->hw_status) {
8989 tnapi->hw_status->status = 0;
8990 tnapi->hw_status->status_tag = 0;
8992 tnapi->last_tag = 0;
8993 tnapi->last_irq_tag = 0;
8997 for (i = 0; i < tp->irq_cnt; i++)
8998 synchronize_irq(tp->napi[i].irq_vec);
9000 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9001 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9002 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9006 val = GRC_MISC_CFG_CORECLK_RESET;
9008 if (tg3_flag(tp, PCI_EXPRESS)) {
9009 /* Force PCIe 1.0a mode */
9010 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9011 !tg3_flag(tp, 57765_PLUS) &&
9012 tr32(TG3_PCIE_PHY_TSTCTL) ==
9013 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9014 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9016 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9017 tw32(GRC_MISC_CFG, (1 << 29));
9022 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9023 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9024 tw32(GRC_VCPU_EXT_CTRL,
9025 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9028 /* Manage gphy power for all CPMU absent PCIe devices. */
9029 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9030 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9032 tw32(GRC_MISC_CFG, val);
9034 /* restore 5701 hardware bug workaround write method */
9035 tp->write32 = write_op;
9037 /* Unfortunately, we have to delay before the PCI read back.
9038 * Some 575X chips even will not respond to a PCI cfg access
9039 * when the reset command is given to the chip.
9041 * How do these hardware designers expect things to work
9042 * properly if the PCI write is posted for a long period
9043 * of time? It is always necessary to have some method by
9044 * which a register read back can occur to push the write
9045 * out which does the reset.
9047 * For most tg3 variants the trick below was working.
9052 /* Flush PCI posted writes. The normal MMIO registers
9053 * are inaccessible at this time so this is the only
9054 * way to make this reliably (actually, this is no longer
9055 * the case, see above). I tried to use indirect
9056 * register read/write but this upset some 5701 variants.
9058 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9062 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9065 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9069 /* Wait for link training to complete. */
9070 for (j = 0; j < 5000; j++)
9073 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9074 pci_write_config_dword(tp->pdev, 0xc4,
9075 cfg_val | (1 << 15));
9078 /* Clear the "no snoop" and "relaxed ordering" bits. */
9079 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9081 * Older PCIe devices only support the 128 byte
9082 * MPS setting. Enforce the restriction.
9084 if (!tg3_flag(tp, CPMU_PRESENT))
9085 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9086 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9088 /* Clear error status */
9089 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9090 PCI_EXP_DEVSTA_CED |
9091 PCI_EXP_DEVSTA_NFED |
9092 PCI_EXP_DEVSTA_FED |
9093 PCI_EXP_DEVSTA_URD);
9096 tg3_restore_pci_state(tp);
9098 tg3_flag_clear(tp, CHIP_RESETTING);
9099 tg3_flag_clear(tp, ERROR_PROCESSED);
9102 if (tg3_flag(tp, 5780_CLASS))
9103 val = tr32(MEMARB_MODE);
9104 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9106 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9108 tw32(0x5000, 0x400);
9111 if (tg3_flag(tp, IS_SSB_CORE)) {
9113 * BCM4785: In order to avoid repercussions from using
9114 * potentially defective internal ROM, stop the Rx RISC CPU,
9115 * which is not required.
9118 tg3_halt_cpu(tp, RX_CPU_BASE);
9121 err = tg3_poll_fw(tp);
9125 tw32(GRC_MODE, tp->grc_mode);
9127 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9130 tw32(0xc4, val | (1 << 15));
9133 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9134 tg3_asic_rev(tp) == ASIC_REV_5705) {
9135 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9136 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9137 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9138 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9141 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9142 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9144 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9145 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9150 tw32_f(MAC_MODE, val);
9153 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9157 if (tg3_flag(tp, PCI_EXPRESS) &&
9158 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9159 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9160 !tg3_flag(tp, 57765_PLUS)) {
9163 tw32(0x7c00, val | (1 << 25));
9166 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9167 val = tr32(TG3_CPMU_CLCK_ORIDE);
9168 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9171 /* Reprobe ASF enable state. */
9172 tg3_flag_clear(tp, ENABLE_ASF);
9173 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9174 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9176 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9177 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9178 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9181 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9182 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9183 tg3_flag_set(tp, ENABLE_ASF);
9184 tp->last_event_jiffies = jiffies;
9185 if (tg3_flag(tp, 5750_PLUS))
9186 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9188 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9189 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9190 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9191 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9192 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9199 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9200 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9202 /* tp->lock is held. */
9203 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9209 tg3_write_sig_pre_reset(tp, kind);
9211 tg3_abort_hw(tp, silent);
9212 err = tg3_chip_reset(tp);
9214 __tg3_set_mac_addr(tp, false);
9216 tg3_write_sig_legacy(tp, kind);
9217 tg3_write_sig_post_reset(tp, kind);
9220 /* Save the stats across chip resets... */
9221 tg3_get_nstats(tp, &tp->net_stats_prev);
9222 tg3_get_estats(tp, &tp->estats_prev);
9224 /* And make sure the next sample is new data */
9225 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9231 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9233 struct tg3 *tp = netdev_priv(dev);
9234 struct sockaddr *addr = p;
9236 bool skip_mac_1 = false;
9238 if (!is_valid_ether_addr(addr->sa_data))
9239 return -EADDRNOTAVAIL;
9241 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9243 if (!netif_running(dev))
9246 if (tg3_flag(tp, ENABLE_ASF)) {
9247 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9249 addr0_high = tr32(MAC_ADDR_0_HIGH);
9250 addr0_low = tr32(MAC_ADDR_0_LOW);
9251 addr1_high = tr32(MAC_ADDR_1_HIGH);
9252 addr1_low = tr32(MAC_ADDR_1_LOW);
9254 /* Skip MAC addr 1 if ASF is using it. */
9255 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9256 !(addr1_high == 0 && addr1_low == 0))
9259 spin_lock_bh(&tp->lock);
9260 __tg3_set_mac_addr(tp, skip_mac_1);
9261 spin_unlock_bh(&tp->lock);
9266 /* tp->lock is held. */
9267 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9268 dma_addr_t mapping, u32 maxlen_flags,
9272 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9273 ((u64) mapping >> 32));
9275 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9276 ((u64) mapping & 0xffffffff));
9278 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9281 if (!tg3_flag(tp, 5705_PLUS))
9283 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9288 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9292 if (!tg3_flag(tp, ENABLE_TSS)) {
9293 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9294 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9295 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9297 tw32(HOSTCC_TXCOL_TICKS, 0);
9298 tw32(HOSTCC_TXMAX_FRAMES, 0);
9299 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9301 for (; i < tp->txq_cnt; i++) {
9304 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9305 tw32(reg, ec->tx_coalesce_usecs);
9306 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9307 tw32(reg, ec->tx_max_coalesced_frames);
9308 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9309 tw32(reg, ec->tx_max_coalesced_frames_irq);
9313 for (; i < tp->irq_max - 1; i++) {
9314 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9315 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9316 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9320 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9323 u32 limit = tp->rxq_cnt;
9325 if (!tg3_flag(tp, ENABLE_RSS)) {
9326 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9327 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9328 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9331 tw32(HOSTCC_RXCOL_TICKS, 0);
9332 tw32(HOSTCC_RXMAX_FRAMES, 0);
9333 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9336 for (; i < limit; i++) {
9339 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9340 tw32(reg, ec->rx_coalesce_usecs);
9341 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9342 tw32(reg, ec->rx_max_coalesced_frames);
9343 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9344 tw32(reg, ec->rx_max_coalesced_frames_irq);
9347 for (; i < tp->irq_max - 1; i++) {
9348 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9349 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9350 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9354 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9356 tg3_coal_tx_init(tp, ec);
9357 tg3_coal_rx_init(tp, ec);
9359 if (!tg3_flag(tp, 5705_PLUS)) {
9360 u32 val = ec->stats_block_coalesce_usecs;
9362 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9363 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9368 tw32(HOSTCC_STAT_COAL_TICKS, val);
9372 /* tp->lock is held. */
9373 static void tg3_tx_rcbs_disable(struct tg3 *tp)
9377 /* Disable all transmit rings but the first. */
9378 if (!tg3_flag(tp, 5705_PLUS))
9379 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9380 else if (tg3_flag(tp, 5717_PLUS))
9381 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9382 else if (tg3_flag(tp, 57765_CLASS) ||
9383 tg3_asic_rev(tp) == ASIC_REV_5762)
9384 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9386 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9388 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9389 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9390 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9391 BDINFO_FLAGS_DISABLED);
9394 /* tp->lock is held. */
9395 static void tg3_tx_rcbs_init(struct tg3 *tp)
9398 u32 txrcb = NIC_SRAM_SEND_RCB;
9400 if (tg3_flag(tp, ENABLE_TSS))
9403 for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9404 struct tg3_napi *tnapi = &tp->napi[i];
9406 if (!tnapi->tx_ring)
9409 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9410 (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9411 NIC_SRAM_TX_BUFFER_DESC);
9415 /* tp->lock is held. */
9416 static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9420 /* Disable all receive return rings but the first. */
9421 if (tg3_flag(tp, 5717_PLUS))
9422 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9423 else if (!tg3_flag(tp, 5705_PLUS))
9424 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9425 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9426 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9427 tg3_flag(tp, 57765_CLASS))
9428 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9430 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9432 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9433 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9434 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9435 BDINFO_FLAGS_DISABLED);
9438 /* tp->lock is held. */
9439 static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9442 u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9444 if (tg3_flag(tp, ENABLE_RSS))
9447 for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9448 struct tg3_napi *tnapi = &tp->napi[i];
9453 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9454 (tp->rx_ret_ring_mask + 1) <<
9455 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9459 /* tp->lock is held. */
9460 static void tg3_rings_reset(struct tg3 *tp)
9464 struct tg3_napi *tnapi = &tp->napi[0];
9466 tg3_tx_rcbs_disable(tp);
9468 tg3_rx_ret_rcbs_disable(tp);
9470 /* Disable interrupts */
9471 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9472 tp->napi[0].chk_msi_cnt = 0;
9473 tp->napi[0].last_rx_cons = 0;
9474 tp->napi[0].last_tx_cons = 0;
9476 /* Zero mailbox registers. */
9477 if (tg3_flag(tp, SUPPORT_MSIX)) {
9478 for (i = 1; i < tp->irq_max; i++) {
9479 tp->napi[i].tx_prod = 0;
9480 tp->napi[i].tx_cons = 0;
9481 if (tg3_flag(tp, ENABLE_TSS))
9482 tw32_mailbox(tp->napi[i].prodmbox, 0);
9483 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9484 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9485 tp->napi[i].chk_msi_cnt = 0;
9486 tp->napi[i].last_rx_cons = 0;
9487 tp->napi[i].last_tx_cons = 0;
9489 if (!tg3_flag(tp, ENABLE_TSS))
9490 tw32_mailbox(tp->napi[0].prodmbox, 0);
9492 tp->napi[0].tx_prod = 0;
9493 tp->napi[0].tx_cons = 0;
9494 tw32_mailbox(tp->napi[0].prodmbox, 0);
9495 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9498 /* Make sure the NIC-based send BD rings are disabled. */
9499 if (!tg3_flag(tp, 5705_PLUS)) {
9500 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9501 for (i = 0; i < 16; i++)
9502 tw32_tx_mbox(mbox + i * 8, 0);
9505 /* Clear status block in ram. */
9506 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9508 /* Set status block DMA address */
9509 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9510 ((u64) tnapi->status_mapping >> 32));
9511 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9512 ((u64) tnapi->status_mapping & 0xffffffff));
9514 stblk = HOSTCC_STATBLCK_RING1;
9516 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9517 u64 mapping = (u64)tnapi->status_mapping;
9518 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9519 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9522 /* Clear status block in ram. */
9523 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9526 tg3_tx_rcbs_init(tp);
9527 tg3_rx_ret_rcbs_init(tp);
9530 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9532 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9534 if (!tg3_flag(tp, 5750_PLUS) ||
9535 tg3_flag(tp, 5780_CLASS) ||
9536 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9537 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9538 tg3_flag(tp, 57765_PLUS))
9539 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9540 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9541 tg3_asic_rev(tp) == ASIC_REV_5787)
9542 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9544 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9546 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9547 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9549 val = min(nic_rep_thresh, host_rep_thresh);
9550 tw32(RCVBDI_STD_THRESH, val);
9552 if (tg3_flag(tp, 57765_PLUS))
9553 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9555 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9558 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9560 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9562 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9563 tw32(RCVBDI_JUMBO_THRESH, val);
9565 if (tg3_flag(tp, 57765_PLUS))
9566 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9569 static inline u32 calc_crc(unsigned char *buf, int len)
9577 for (j = 0; j < len; j++) {
9580 for (k = 0; k < 8; k++) {
9593 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9595 /* accept or reject all multicast frames */
9596 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9597 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9598 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9599 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9602 static void __tg3_set_rx_mode(struct net_device *dev)
9604 struct tg3 *tp = netdev_priv(dev);
9607 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9608 RX_MODE_KEEP_VLAN_TAG);
9610 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9611 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9614 if (!tg3_flag(tp, ENABLE_ASF))
9615 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9618 if (dev->flags & IFF_PROMISC) {
9619 /* Promiscuous mode. */
9620 rx_mode |= RX_MODE_PROMISC;
9621 } else if (dev->flags & IFF_ALLMULTI) {
9622 /* Accept all multicast. */
9623 tg3_set_multi(tp, 1);
9624 } else if (netdev_mc_empty(dev)) {
9625 /* Reject all multicast. */
9626 tg3_set_multi(tp, 0);
9628 /* Accept one or more multicast(s). */
9629 struct netdev_hw_addr *ha;
9630 u32 mc_filter[4] = { 0, };
9635 netdev_for_each_mc_addr(ha, dev) {
9636 crc = calc_crc(ha->addr, ETH_ALEN);
9638 regidx = (bit & 0x60) >> 5;
9640 mc_filter[regidx] |= (1 << bit);
9643 tw32(MAC_HASH_REG_0, mc_filter[0]);
9644 tw32(MAC_HASH_REG_1, mc_filter[1]);
9645 tw32(MAC_HASH_REG_2, mc_filter[2]);
9646 tw32(MAC_HASH_REG_3, mc_filter[3]);
9649 if (rx_mode != tp->rx_mode) {
9650 tp->rx_mode = rx_mode;
9651 tw32_f(MAC_RX_MODE, rx_mode);
9656 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9660 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9661 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9664 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9668 if (!tg3_flag(tp, SUPPORT_MSIX))
9671 if (tp->rxq_cnt == 1) {
9672 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9676 /* Validate table against current IRQ count */
9677 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9678 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9682 if (i != TG3_RSS_INDIR_TBL_SIZE)
9683 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9686 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9689 u32 reg = MAC_RSS_INDIR_TBL_0;
9691 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9692 u32 val = tp->rss_ind_tbl[i];
9694 for (; i % 8; i++) {
9696 val |= tp->rss_ind_tbl[i];
9703 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9705 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9706 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9708 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9711 /* tp->lock is held. */
9712 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9714 u32 val, rdmac_mode;
9716 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9718 tg3_disable_ints(tp);
9722 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9724 if (tg3_flag(tp, INIT_COMPLETE))
9725 tg3_abort_hw(tp, 1);
9727 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9728 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9729 tg3_phy_pull_config(tp);
9730 tg3_eee_pull_config(tp, NULL);
9731 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9734 /* Enable MAC control of LPI */
9735 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9741 err = tg3_chip_reset(tp);
9745 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9747 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9748 val = tr32(TG3_CPMU_CTRL);
9749 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9750 tw32(TG3_CPMU_CTRL, val);
9752 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9753 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9754 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9755 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9757 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9758 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9759 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9760 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9762 val = tr32(TG3_CPMU_HST_ACC);
9763 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9764 val |= CPMU_HST_ACC_MACCLK_6_25;
9765 tw32(TG3_CPMU_HST_ACC, val);
9768 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9769 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9770 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9771 PCIE_PWR_MGMT_L1_THRESH_4MS;
9772 tw32(PCIE_PWR_MGMT_THRESH, val);
9774 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9775 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9777 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9779 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9780 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9783 if (tg3_flag(tp, L1PLLPD_EN)) {
9784 u32 grc_mode = tr32(GRC_MODE);
9786 /* Access the lower 1K of PL PCIE block registers. */
9787 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9788 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9790 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9791 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9792 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9794 tw32(GRC_MODE, grc_mode);
9797 if (tg3_flag(tp, 57765_CLASS)) {
9798 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9799 u32 grc_mode = tr32(GRC_MODE);
9801 /* Access the lower 1K of PL PCIE block registers. */
9802 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9803 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9805 val = tr32(TG3_PCIE_TLDLPL_PORT +
9806 TG3_PCIE_PL_LO_PHYCTL5);
9807 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9808 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9810 tw32(GRC_MODE, grc_mode);
9813 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9816 /* Fix transmit hangs */
9817 val = tr32(TG3_CPMU_PADRNG_CTL);
9818 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9819 tw32(TG3_CPMU_PADRNG_CTL, val);
9821 grc_mode = tr32(GRC_MODE);
9823 /* Access the lower 1K of DL PCIE block registers. */
9824 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9825 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9827 val = tr32(TG3_PCIE_TLDLPL_PORT +
9828 TG3_PCIE_DL_LO_FTSMAX);
9829 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9830 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9831 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9833 tw32(GRC_MODE, grc_mode);
9836 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9837 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9838 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9839 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9842 /* This works around an issue with Athlon chipsets on
9843 * B3 tigon3 silicon. This bit has no effect on any
9844 * other revision. But do not set this on PCI Express
9845 * chips and don't even touch the clocks if the CPMU is present.
9847 if (!tg3_flag(tp, CPMU_PRESENT)) {
9848 if (!tg3_flag(tp, PCI_EXPRESS))
9849 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9850 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9853 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9854 tg3_flag(tp, PCIX_MODE)) {
9855 val = tr32(TG3PCI_PCISTATE);
9856 val |= PCISTATE_RETRY_SAME_DMA;
9857 tw32(TG3PCI_PCISTATE, val);
9860 if (tg3_flag(tp, ENABLE_APE)) {
9861 /* Allow reads and writes to the
9862 * APE register and memory space.
9864 val = tr32(TG3PCI_PCISTATE);
9865 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9866 PCISTATE_ALLOW_APE_SHMEM_WR |
9867 PCISTATE_ALLOW_APE_PSPACE_WR;
9868 tw32(TG3PCI_PCISTATE, val);
9871 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9872 /* Enable some hw fixes. */
9873 val = tr32(TG3PCI_MSI_DATA);
9874 val |= (1 << 26) | (1 << 28) | (1 << 29);
9875 tw32(TG3PCI_MSI_DATA, val);
9878 /* Descriptor ring init may make accesses to the
9879 * NIC SRAM area to setup the TX descriptors, so we
9880 * can only do this after the hardware has been
9881 * successfully reset.
9883 err = tg3_init_rings(tp);
9887 if (tg3_flag(tp, 57765_PLUS)) {
9888 val = tr32(TG3PCI_DMA_RW_CTRL) &
9889 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9890 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9891 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9892 if (!tg3_flag(tp, 57765_CLASS) &&
9893 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9894 tg3_asic_rev(tp) != ASIC_REV_5762)
9895 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9896 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9897 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9898 tg3_asic_rev(tp) != ASIC_REV_5761) {
9899 /* This value is determined during the probe time DMA
9900 * engine test, tg3_test_dma.
9902 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9905 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9906 GRC_MODE_4X_NIC_SEND_RINGS |
9907 GRC_MODE_NO_TX_PHDR_CSUM |
9908 GRC_MODE_NO_RX_PHDR_CSUM);
9909 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9911 /* Pseudo-header checksum is done by hardware logic and not
9912 * the offload processers, so make the chip do the pseudo-
9913 * header checksums on receive. For transmit it is more
9914 * convenient to do the pseudo-header checksum in software
9915 * as Linux does that on transmit for us in all cases.
9917 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9919 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9921 tw32(TG3_RX_PTP_CTL,
9922 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9924 if (tg3_flag(tp, PTP_CAPABLE))
9925 val |= GRC_MODE_TIME_SYNC_ENABLE;
9927 tw32(GRC_MODE, tp->grc_mode | val);
9929 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9930 val = tr32(GRC_MISC_CFG);
9932 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9933 tw32(GRC_MISC_CFG, val);
9935 /* Initialize MBUF/DESC pool. */
9936 if (tg3_flag(tp, 5750_PLUS)) {
9938 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9939 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9940 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9941 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9943 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9944 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9945 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9946 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9949 fw_len = tp->fw_len;
9950 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9951 tw32(BUFMGR_MB_POOL_ADDR,
9952 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9953 tw32(BUFMGR_MB_POOL_SIZE,
9954 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9957 if (tp->dev->mtu <= ETH_DATA_LEN) {
9958 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9959 tp->bufmgr_config.mbuf_read_dma_low_water);
9960 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9961 tp->bufmgr_config.mbuf_mac_rx_low_water);
9962 tw32(BUFMGR_MB_HIGH_WATER,
9963 tp->bufmgr_config.mbuf_high_water);
9965 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9966 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9967 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9968 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9969 tw32(BUFMGR_MB_HIGH_WATER,
9970 tp->bufmgr_config.mbuf_high_water_jumbo);
9972 tw32(BUFMGR_DMA_LOW_WATER,
9973 tp->bufmgr_config.dma_low_water);
9974 tw32(BUFMGR_DMA_HIGH_WATER,
9975 tp->bufmgr_config.dma_high_water);
9977 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9978 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9979 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9980 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9981 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9982 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9983 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9984 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9985 tw32(BUFMGR_MODE, val);
9986 for (i = 0; i < 2000; i++) {
9987 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9992 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9996 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9997 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9999 tg3_setup_rxbd_thresholds(tp);
10001 /* Initialize TG3_BDINFO's at:
10002 * RCVDBDI_STD_BD: standard eth size rx ring
10003 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
10004 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
10007 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
10008 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
10009 * ring attribute flags
10010 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
10012 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10013 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10015 * The size of each ring is fixed in the firmware, but the location is
10018 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10019 ((u64) tpr->rx_std_mapping >> 32));
10020 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10021 ((u64) tpr->rx_std_mapping & 0xffffffff));
10022 if (!tg3_flag(tp, 5717_PLUS))
10023 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10024 NIC_SRAM_RX_BUFFER_DESC);
10026 /* Disable the mini ring */
10027 if (!tg3_flag(tp, 5705_PLUS))
10028 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10029 BDINFO_FLAGS_DISABLED);
10031 /* Program the jumbo buffer descriptor ring control
10032 * blocks on those devices that have them.
10034 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10035 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10037 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10038 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10039 ((u64) tpr->rx_jmb_mapping >> 32));
10040 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10041 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10042 val = TG3_RX_JMB_RING_SIZE(tp) <<
10043 BDINFO_FLAGS_MAXLEN_SHIFT;
10044 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10045 val | BDINFO_FLAGS_USE_EXT_RECV);
10046 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10047 tg3_flag(tp, 57765_CLASS) ||
10048 tg3_asic_rev(tp) == ASIC_REV_5762)
10049 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10050 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10052 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10053 BDINFO_FLAGS_DISABLED);
10056 if (tg3_flag(tp, 57765_PLUS)) {
10057 val = TG3_RX_STD_RING_SIZE(tp);
10058 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10059 val |= (TG3_RX_STD_DMA_SZ << 2);
10061 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10063 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10065 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10067 tpr->rx_std_prod_idx = tp->rx_pending;
10068 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10070 tpr->rx_jmb_prod_idx =
10071 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10072 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10074 tg3_rings_reset(tp);
10076 /* Initialize MAC address and backoff seed. */
10077 __tg3_set_mac_addr(tp, false);
10079 /* MTU + ethernet header + FCS + optional VLAN tag */
10080 tw32(MAC_RX_MTU_SIZE,
10081 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10083 /* The slot time is changed by tg3_setup_phy if we
10084 * run at gigabit with half duplex.
10086 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10087 (6 << TX_LENGTHS_IPG_SHIFT) |
10088 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10090 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10091 tg3_asic_rev(tp) == ASIC_REV_5762)
10092 val |= tr32(MAC_TX_LENGTHS) &
10093 (TX_LENGTHS_JMB_FRM_LEN_MSK |
10094 TX_LENGTHS_CNT_DWN_VAL_MSK);
10096 tw32(MAC_TX_LENGTHS, val);
10098 /* Receive rules. */
10099 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10100 tw32(RCVLPC_CONFIG, 0x0181);
10102 /* Calculate RDMAC_MODE setting early, we need it to determine
10103 * the RCVLPC_STATE_ENABLE mask.
10105 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10106 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10107 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10108 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10109 RDMAC_MODE_LNGREAD_ENAB);
10111 if (tg3_asic_rev(tp) == ASIC_REV_5717)
10112 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10114 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10115 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10116 tg3_asic_rev(tp) == ASIC_REV_57780)
10117 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10118 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10119 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10121 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10122 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10123 if (tg3_flag(tp, TSO_CAPABLE) &&
10124 tg3_asic_rev(tp) == ASIC_REV_5705) {
10125 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10126 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10127 !tg3_flag(tp, IS_5788)) {
10128 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10132 if (tg3_flag(tp, PCI_EXPRESS))
10133 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10135 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10137 if (tp->dev->mtu <= ETH_DATA_LEN) {
10138 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10139 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10143 if (tg3_flag(tp, HW_TSO_1) ||
10144 tg3_flag(tp, HW_TSO_2) ||
10145 tg3_flag(tp, HW_TSO_3))
10146 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10148 if (tg3_flag(tp, 57765_PLUS) ||
10149 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10150 tg3_asic_rev(tp) == ASIC_REV_57780)
10151 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10153 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10154 tg3_asic_rev(tp) == ASIC_REV_5762)
10155 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10157 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10158 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10159 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10160 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10161 tg3_flag(tp, 57765_PLUS)) {
10164 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10165 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10167 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10169 val = tr32(tgtreg);
10170 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10171 tg3_asic_rev(tp) == ASIC_REV_5762) {
10172 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10173 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10174 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10175 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10176 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10177 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10179 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10182 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10183 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10184 tg3_asic_rev(tp) == ASIC_REV_5762) {
10187 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10188 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10190 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10192 val = tr32(tgtreg);
10194 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10195 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10198 /* Receive/send statistics. */
10199 if (tg3_flag(tp, 5750_PLUS)) {
10200 val = tr32(RCVLPC_STATS_ENABLE);
10201 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10202 tw32(RCVLPC_STATS_ENABLE, val);
10203 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10204 tg3_flag(tp, TSO_CAPABLE)) {
10205 val = tr32(RCVLPC_STATS_ENABLE);
10206 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10207 tw32(RCVLPC_STATS_ENABLE, val);
10209 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10211 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10212 tw32(SNDDATAI_STATSENAB, 0xffffff);
10213 tw32(SNDDATAI_STATSCTRL,
10214 (SNDDATAI_SCTRL_ENABLE |
10215 SNDDATAI_SCTRL_FASTUPD));
10217 /* Setup host coalescing engine. */
10218 tw32(HOSTCC_MODE, 0);
10219 for (i = 0; i < 2000; i++) {
10220 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10225 __tg3_set_coalesce(tp, &tp->coal);
10227 if (!tg3_flag(tp, 5705_PLUS)) {
10228 /* Status/statistics block address. See tg3_timer,
10229 * the tg3_periodic_fetch_stats call there, and
10230 * tg3_get_stats to see how this works for 5705/5750 chips.
10232 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10233 ((u64) tp->stats_mapping >> 32));
10234 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10235 ((u64) tp->stats_mapping & 0xffffffff));
10236 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10238 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10240 /* Clear statistics and status block memory areas */
10241 for (i = NIC_SRAM_STATS_BLK;
10242 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10243 i += sizeof(u32)) {
10244 tg3_write_mem(tp, i, 0);
10249 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10251 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10252 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10253 if (!tg3_flag(tp, 5705_PLUS))
10254 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10256 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10257 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10258 /* reset to prevent losing 1st rx packet intermittently */
10259 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10263 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10264 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10265 MAC_MODE_FHDE_ENABLE;
10266 if (tg3_flag(tp, ENABLE_APE))
10267 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10268 if (!tg3_flag(tp, 5705_PLUS) &&
10269 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10270 tg3_asic_rev(tp) != ASIC_REV_5700)
10271 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10272 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10275 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10276 * If TG3_FLAG_IS_NIC is zero, we should read the
10277 * register to preserve the GPIO settings for LOMs. The GPIOs,
10278 * whether used as inputs or outputs, are set by boot code after
10281 if (!tg3_flag(tp, IS_NIC)) {
10284 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10285 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10286 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10288 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10289 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10290 GRC_LCLCTRL_GPIO_OUTPUT3;
10292 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10293 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10295 tp->grc_local_ctrl &= ~gpio_mask;
10296 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10298 /* GPIO1 must be driven high for eeprom write protect */
10299 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10300 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10301 GRC_LCLCTRL_GPIO_OUTPUT1);
10303 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10306 if (tg3_flag(tp, USING_MSIX)) {
10307 val = tr32(MSGINT_MODE);
10308 val |= MSGINT_MODE_ENABLE;
10309 if (tp->irq_cnt > 1)
10310 val |= MSGINT_MODE_MULTIVEC_EN;
10311 if (!tg3_flag(tp, 1SHOT_MSI))
10312 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10313 tw32(MSGINT_MODE, val);
10316 if (!tg3_flag(tp, 5705_PLUS)) {
10317 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10321 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10322 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10323 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10324 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10325 WDMAC_MODE_LNGREAD_ENAB);
10327 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10328 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10329 if (tg3_flag(tp, TSO_CAPABLE) &&
10330 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10331 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10333 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10334 !tg3_flag(tp, IS_5788)) {
10335 val |= WDMAC_MODE_RX_ACCEL;
10339 /* Enable host coalescing bug fix */
10340 if (tg3_flag(tp, 5755_PLUS))
10341 val |= WDMAC_MODE_STATUS_TAG_FIX;
10343 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10344 val |= WDMAC_MODE_BURST_ALL_DATA;
10346 tw32_f(WDMAC_MODE, val);
10349 if (tg3_flag(tp, PCIX_MODE)) {
10352 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10354 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10355 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10356 pcix_cmd |= PCI_X_CMD_READ_2K;
10357 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10358 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10359 pcix_cmd |= PCI_X_CMD_READ_2K;
10361 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10365 tw32_f(RDMAC_MODE, rdmac_mode);
10368 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10369 tg3_asic_rev(tp) == ASIC_REV_5720) {
10370 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10371 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10374 if (i < TG3_NUM_RDMA_CHANNELS) {
10375 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10376 val |= tg3_lso_rd_dma_workaround_bit(tp);
10377 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10378 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10382 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10383 if (!tg3_flag(tp, 5705_PLUS))
10384 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10386 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10387 tw32(SNDDATAC_MODE,
10388 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10390 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10392 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10393 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10394 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10395 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10396 val |= RCVDBDI_MODE_LRG_RING_SZ;
10397 tw32(RCVDBDI_MODE, val);
10398 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10399 if (tg3_flag(tp, HW_TSO_1) ||
10400 tg3_flag(tp, HW_TSO_2) ||
10401 tg3_flag(tp, HW_TSO_3))
10402 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10403 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10404 if (tg3_flag(tp, ENABLE_TSS))
10405 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10406 tw32(SNDBDI_MODE, val);
10407 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10409 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10410 err = tg3_load_5701_a0_firmware_fix(tp);
10415 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10416 /* Ignore any errors for the firmware download. If download
10417 * fails, the device will operate with EEE disabled
10419 tg3_load_57766_firmware(tp);
10422 if (tg3_flag(tp, TSO_CAPABLE)) {
10423 err = tg3_load_tso_firmware(tp);
10428 tp->tx_mode = TX_MODE_ENABLE;
10430 if (tg3_flag(tp, 5755_PLUS) ||
10431 tg3_asic_rev(tp) == ASIC_REV_5906)
10432 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10434 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10435 tg3_asic_rev(tp) == ASIC_REV_5762) {
10436 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10437 tp->tx_mode &= ~val;
10438 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10441 tw32_f(MAC_TX_MODE, tp->tx_mode);
10444 if (tg3_flag(tp, ENABLE_RSS)) {
10445 tg3_rss_write_indir_tbl(tp);
10447 /* Setup the "secret" hash key. */
10448 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10449 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10450 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10451 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10452 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10453 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10454 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10455 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10456 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10457 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10460 tp->rx_mode = RX_MODE_ENABLE;
10461 if (tg3_flag(tp, 5755_PLUS))
10462 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10464 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10465 tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10467 if (tg3_flag(tp, ENABLE_RSS))
10468 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10469 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10470 RX_MODE_RSS_IPV6_HASH_EN |
10471 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10472 RX_MODE_RSS_IPV4_HASH_EN |
10473 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10475 tw32_f(MAC_RX_MODE, tp->rx_mode);
10478 tw32(MAC_LED_CTRL, tp->led_ctrl);
10480 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10481 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10482 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10485 tw32_f(MAC_RX_MODE, tp->rx_mode);
10488 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10489 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10490 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10491 /* Set drive transmission level to 1.2V */
10492 /* only if the signal pre-emphasis bit is not set */
10493 val = tr32(MAC_SERDES_CFG);
10496 tw32(MAC_SERDES_CFG, val);
10498 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10499 tw32(MAC_SERDES_CFG, 0x616000);
10502 /* Prevent chip from dropping frames when flow control
10505 if (tg3_flag(tp, 57765_CLASS))
10509 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10511 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10512 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10513 /* Use hardware link auto-negotiation */
10514 tg3_flag_set(tp, HW_AUTONEG);
10517 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10518 tg3_asic_rev(tp) == ASIC_REV_5714) {
10521 tmp = tr32(SERDES_RX_CTRL);
10522 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10523 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10524 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10525 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10528 if (!tg3_flag(tp, USE_PHYLIB)) {
10529 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10530 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10532 err = tg3_setup_phy(tp, false);
10536 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10537 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10540 /* Clear CRC stats. */
10541 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10542 tg3_writephy(tp, MII_TG3_TEST1,
10543 tmp | MII_TG3_TEST1_CRC_EN);
10544 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10549 __tg3_set_rx_mode(tp->dev);
10551 /* Initialize receive rules. */
10552 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10553 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10554 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10555 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10557 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10561 if (tg3_flag(tp, ENABLE_ASF))
10565 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10567 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10569 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10571 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10573 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10575 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10577 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10579 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10581 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10583 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10585 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10587 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10589 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10591 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10599 if (tg3_flag(tp, ENABLE_APE))
10600 /* Write our heartbeat update interval to APE. */
10601 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10602 APE_HOST_HEARTBEAT_INT_DISABLE);
10604 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10609 /* Called at device open time to get the chip ready for
10610 * packet processing. Invoked with tp->lock held.
10612 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10614 /* Chip may have been just powered on. If so, the boot code may still
10615 * be running initialization. Wait for it to finish to avoid races in
10616 * accessing the hardware.
10618 tg3_enable_register_access(tp);
10621 tg3_switch_clocks(tp);
10623 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10625 return tg3_reset_hw(tp, reset_phy);
10628 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10632 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10633 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10635 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10638 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10639 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10640 memset(ocir, 0, TG3_OCIR_LEN);
10644 /* sysfs attributes for hwmon */
10645 static ssize_t tg3_show_temp(struct device *dev,
10646 struct device_attribute *devattr, char *buf)
10648 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10649 struct tg3 *tp = dev_get_drvdata(dev);
10652 spin_lock_bh(&tp->lock);
10653 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10654 sizeof(temperature));
10655 spin_unlock_bh(&tp->lock);
10656 return sprintf(buf, "%u\n", temperature);
10660 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10661 TG3_TEMP_SENSOR_OFFSET);
10662 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10663 TG3_TEMP_CAUTION_OFFSET);
10664 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10665 TG3_TEMP_MAX_OFFSET);
10667 static struct attribute *tg3_attrs[] = {
10668 &sensor_dev_attr_temp1_input.dev_attr.attr,
10669 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10670 &sensor_dev_attr_temp1_max.dev_attr.attr,
10673 ATTRIBUTE_GROUPS(tg3);
10675 static void tg3_hwmon_close(struct tg3 *tp)
10677 if (tp->hwmon_dev) {
10678 hwmon_device_unregister(tp->hwmon_dev);
10679 tp->hwmon_dev = NULL;
10683 static void tg3_hwmon_open(struct tg3 *tp)
10687 struct pci_dev *pdev = tp->pdev;
10688 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10690 tg3_sd_scan_scratchpad(tp, ocirs);
10692 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10693 if (!ocirs[i].src_data_length)
10696 size += ocirs[i].src_hdr_length;
10697 size += ocirs[i].src_data_length;
10703 tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10705 if (IS_ERR(tp->hwmon_dev)) {
10706 tp->hwmon_dev = NULL;
10707 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10712 #define TG3_STAT_ADD32(PSTAT, REG) \
10713 do { u32 __val = tr32(REG); \
10714 (PSTAT)->low += __val; \
10715 if ((PSTAT)->low < __val) \
10716 (PSTAT)->high += 1; \
10719 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10721 struct tg3_hw_stats *sp = tp->hw_stats;
10726 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10727 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10728 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10729 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10730 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10731 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10732 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10733 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10734 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10735 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10736 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10737 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10738 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10739 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10740 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10741 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10744 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10745 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10746 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10747 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10750 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10751 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10752 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10753 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10754 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10755 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10756 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10757 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10758 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10759 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10760 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10761 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10762 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10763 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10765 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10766 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10767 tg3_asic_rev(tp) != ASIC_REV_5762 &&
10768 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10769 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10770 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10772 u32 val = tr32(HOSTCC_FLOW_ATTN);
10773 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10775 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10776 sp->rx_discards.low += val;
10777 if (sp->rx_discards.low < val)
10778 sp->rx_discards.high += 1;
10780 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10782 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10785 static void tg3_chk_missed_msi(struct tg3 *tp)
10789 for (i = 0; i < tp->irq_cnt; i++) {
10790 struct tg3_napi *tnapi = &tp->napi[i];
10792 if (tg3_has_work(tnapi)) {
10793 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10794 tnapi->last_tx_cons == tnapi->tx_cons) {
10795 if (tnapi->chk_msi_cnt < 1) {
10796 tnapi->chk_msi_cnt++;
10802 tnapi->chk_msi_cnt = 0;
10803 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10804 tnapi->last_tx_cons = tnapi->tx_cons;
10808 static void tg3_timer(unsigned long __opaque)
10810 struct tg3 *tp = (struct tg3 *) __opaque;
10812 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10813 goto restart_timer;
10815 spin_lock(&tp->lock);
10817 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10818 tg3_flag(tp, 57765_CLASS))
10819 tg3_chk_missed_msi(tp);
10821 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10822 /* BCM4785: Flush posted writes from GbE to host memory. */
10826 if (!tg3_flag(tp, TAGGED_STATUS)) {
10827 /* All of this garbage is because when using non-tagged
10828 * IRQ status the mailbox/status_block protocol the chip
10829 * uses with the cpu is race prone.
10831 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10832 tw32(GRC_LOCAL_CTRL,
10833 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10835 tw32(HOSTCC_MODE, tp->coalesce_mode |
10836 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10839 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10840 spin_unlock(&tp->lock);
10841 tg3_reset_task_schedule(tp);
10842 goto restart_timer;
10846 /* This part only runs once per second. */
10847 if (!--tp->timer_counter) {
10848 if (tg3_flag(tp, 5705_PLUS))
10849 tg3_periodic_fetch_stats(tp);
10851 if (tp->setlpicnt && !--tp->setlpicnt)
10852 tg3_phy_eee_enable(tp);
10854 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10858 mac_stat = tr32(MAC_STATUS);
10861 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10862 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10864 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10868 tg3_setup_phy(tp, false);
10869 } else if (tg3_flag(tp, POLL_SERDES)) {
10870 u32 mac_stat = tr32(MAC_STATUS);
10871 int need_setup = 0;
10874 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10877 if (!tp->link_up &&
10878 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10879 MAC_STATUS_SIGNAL_DET))) {
10883 if (!tp->serdes_counter) {
10886 ~MAC_MODE_PORT_MODE_MASK));
10888 tw32_f(MAC_MODE, tp->mac_mode);
10891 tg3_setup_phy(tp, false);
10893 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10894 tg3_flag(tp, 5780_CLASS)) {
10895 tg3_serdes_parallel_detect(tp);
10898 tp->timer_counter = tp->timer_multiplier;
10901 /* Heartbeat is only sent once every 2 seconds.
10903 * The heartbeat is to tell the ASF firmware that the host
10904 * driver is still alive. In the event that the OS crashes,
10905 * ASF needs to reset the hardware to free up the FIFO space
10906 * that may be filled with rx packets destined for the host.
10907 * If the FIFO is full, ASF will no longer function properly.
10909 * Unintended resets have been reported on real time kernels
10910 * where the timer doesn't run on time. Netpoll will also have
10913 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10914 * to check the ring condition when the heartbeat is expiring
10915 * before doing the reset. This will prevent most unintended
10918 if (!--tp->asf_counter) {
10919 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10920 tg3_wait_for_event_ack(tp);
10922 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10923 FWCMD_NICDRV_ALIVE3);
10924 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10925 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10926 TG3_FW_UPDATE_TIMEOUT_SEC);
10928 tg3_generate_fw_event(tp);
10930 tp->asf_counter = tp->asf_multiplier;
10933 spin_unlock(&tp->lock);
10936 tp->timer.expires = jiffies + tp->timer_offset;
10937 add_timer(&tp->timer);
10940 static void tg3_timer_init(struct tg3 *tp)
10942 if (tg3_flag(tp, TAGGED_STATUS) &&
10943 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10944 !tg3_flag(tp, 57765_CLASS))
10945 tp->timer_offset = HZ;
10947 tp->timer_offset = HZ / 10;
10949 BUG_ON(tp->timer_offset > HZ);
10951 tp->timer_multiplier = (HZ / tp->timer_offset);
10952 tp->asf_multiplier = (HZ / tp->timer_offset) *
10953 TG3_FW_UPDATE_FREQ_SEC;
10955 init_timer(&tp->timer);
10956 tp->timer.data = (unsigned long) tp;
10957 tp->timer.function = tg3_timer;
10960 static void tg3_timer_start(struct tg3 *tp)
10962 tp->asf_counter = tp->asf_multiplier;
10963 tp->timer_counter = tp->timer_multiplier;
10965 tp->timer.expires = jiffies + tp->timer_offset;
10966 add_timer(&tp->timer);
10969 static void tg3_timer_stop(struct tg3 *tp)
10971 del_timer_sync(&tp->timer);
10974 /* Restart hardware after configuration changes, self-test, etc.
10975 * Invoked with tp->lock held.
10977 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10978 __releases(tp->lock)
10979 __acquires(tp->lock)
10983 err = tg3_init_hw(tp, reset_phy);
10985 netdev_err(tp->dev,
10986 "Failed to re-initialize device, aborting\n");
10987 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10988 tg3_full_unlock(tp);
10989 tg3_timer_stop(tp);
10991 tg3_napi_enable(tp);
10992 dev_close(tp->dev);
10993 tg3_full_lock(tp, 0);
10998 static void tg3_reset_task(struct work_struct *work)
11000 struct tg3 *tp = container_of(work, struct tg3, reset_task);
11003 tg3_full_lock(tp, 0);
11005 if (!netif_running(tp->dev)) {
11006 tg3_flag_clear(tp, RESET_TASK_PENDING);
11007 tg3_full_unlock(tp);
11011 tg3_full_unlock(tp);
11015 tg3_netif_stop(tp);
11017 tg3_full_lock(tp, 1);
11019 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11020 tp->write32_tx_mbox = tg3_write32_tx_mbox;
11021 tp->write32_rx_mbox = tg3_write_flush_reg32;
11022 tg3_flag_set(tp, MBOX_WRITE_REORDER);
11023 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11026 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11027 err = tg3_init_hw(tp, true);
11031 tg3_netif_start(tp);
11034 tg3_full_unlock(tp);
11039 tg3_flag_clear(tp, RESET_TASK_PENDING);
11042 static int tg3_request_irq(struct tg3 *tp, int irq_num)
11045 unsigned long flags;
11047 struct tg3_napi *tnapi = &tp->napi[irq_num];
11049 if (tp->irq_cnt == 1)
11050 name = tp->dev->name;
11052 name = &tnapi->irq_lbl[0];
11053 if (tnapi->tx_buffers && tnapi->rx_rcb)
11054 snprintf(name, IFNAMSIZ,
11055 "%s-txrx-%d", tp->dev->name, irq_num);
11056 else if (tnapi->tx_buffers)
11057 snprintf(name, IFNAMSIZ,
11058 "%s-tx-%d", tp->dev->name, irq_num);
11059 else if (tnapi->rx_rcb)
11060 snprintf(name, IFNAMSIZ,
11061 "%s-rx-%d", tp->dev->name, irq_num);
11063 snprintf(name, IFNAMSIZ,
11064 "%s-%d", tp->dev->name, irq_num);
11065 name[IFNAMSIZ-1] = 0;
11068 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11070 if (tg3_flag(tp, 1SHOT_MSI))
11071 fn = tg3_msi_1shot;
11074 fn = tg3_interrupt;
11075 if (tg3_flag(tp, TAGGED_STATUS))
11076 fn = tg3_interrupt_tagged;
11077 flags = IRQF_SHARED;
11080 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11083 static int tg3_test_interrupt(struct tg3 *tp)
11085 struct tg3_napi *tnapi = &tp->napi[0];
11086 struct net_device *dev = tp->dev;
11087 int err, i, intr_ok = 0;
11090 if (!netif_running(dev))
11093 tg3_disable_ints(tp);
11095 free_irq(tnapi->irq_vec, tnapi);
11098 * Turn off MSI one shot mode. Otherwise this test has no
11099 * observable way to know whether the interrupt was delivered.
11101 if (tg3_flag(tp, 57765_PLUS)) {
11102 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11103 tw32(MSGINT_MODE, val);
11106 err = request_irq(tnapi->irq_vec, tg3_test_isr,
11107 IRQF_SHARED, dev->name, tnapi);
11111 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11112 tg3_enable_ints(tp);
11114 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11117 for (i = 0; i < 5; i++) {
11118 u32 int_mbox, misc_host_ctrl;
11120 int_mbox = tr32_mailbox(tnapi->int_mbox);
11121 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11123 if ((int_mbox != 0) ||
11124 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11129 if (tg3_flag(tp, 57765_PLUS) &&
11130 tnapi->hw_status->status_tag != tnapi->last_tag)
11131 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11136 tg3_disable_ints(tp);
11138 free_irq(tnapi->irq_vec, tnapi);
11140 err = tg3_request_irq(tp, 0);
11146 /* Reenable MSI one shot mode. */
11147 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11148 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11149 tw32(MSGINT_MODE, val);
11157 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11158 * successfully restored
11160 static int tg3_test_msi(struct tg3 *tp)
11165 if (!tg3_flag(tp, USING_MSI))
11168 /* Turn off SERR reporting in case MSI terminates with Master
11171 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11172 pci_write_config_word(tp->pdev, PCI_COMMAND,
11173 pci_cmd & ~PCI_COMMAND_SERR);
11175 err = tg3_test_interrupt(tp);
11177 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11182 /* other failures */
11186 /* MSI test failed, go back to INTx mode */
11187 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11188 "to INTx mode. Please report this failure to the PCI "
11189 "maintainer and include system chipset information\n");
11191 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11193 pci_disable_msi(tp->pdev);
11195 tg3_flag_clear(tp, USING_MSI);
11196 tp->napi[0].irq_vec = tp->pdev->irq;
11198 err = tg3_request_irq(tp, 0);
11202 /* Need to reset the chip because the MSI cycle may have terminated
11203 * with Master Abort.
11205 tg3_full_lock(tp, 1);
11207 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11208 err = tg3_init_hw(tp, true);
11210 tg3_full_unlock(tp);
11213 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11218 static int tg3_request_firmware(struct tg3 *tp)
11220 const struct tg3_firmware_hdr *fw_hdr;
11222 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11223 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11228 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11230 /* Firmware blob starts with version numbers, followed by
11231 * start address and _full_ length including BSS sections
11232 * (which must be longer than the actual data, of course
11235 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11236 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11237 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11238 tp->fw_len, tp->fw_needed);
11239 release_firmware(tp->fw);
11244 /* We no longer need firmware; we have it. */
11245 tp->fw_needed = NULL;
11249 static u32 tg3_irq_count(struct tg3 *tp)
11251 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11254 /* We want as many rx rings enabled as there are cpus.
11255 * In multiqueue MSI-X mode, the first MSI-X vector
11256 * only deals with link interrupts, etc, so we add
11257 * one to the number of vectors we are requesting.
11259 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11265 static bool tg3_enable_msix(struct tg3 *tp)
11268 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11270 tp->txq_cnt = tp->txq_req;
11271 tp->rxq_cnt = tp->rxq_req;
11273 tp->rxq_cnt = netif_get_num_default_rss_queues();
11274 if (tp->rxq_cnt > tp->rxq_max)
11275 tp->rxq_cnt = tp->rxq_max;
11277 /* Disable multiple TX rings by default. Simple round-robin hardware
11278 * scheduling of the TX rings can cause starvation of rings with
11279 * small packets when other rings have TSO or jumbo packets.
11284 tp->irq_cnt = tg3_irq_count(tp);
11286 for (i = 0; i < tp->irq_max; i++) {
11287 msix_ent[i].entry = i;
11288 msix_ent[i].vector = 0;
11291 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11294 } else if (rc != 0) {
11295 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11297 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11300 tp->rxq_cnt = max(rc - 1, 1);
11302 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11305 for (i = 0; i < tp->irq_max; i++)
11306 tp->napi[i].irq_vec = msix_ent[i].vector;
11308 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11309 pci_disable_msix(tp->pdev);
11313 if (tp->irq_cnt == 1)
11316 tg3_flag_set(tp, ENABLE_RSS);
11318 if (tp->txq_cnt > 1)
11319 tg3_flag_set(tp, ENABLE_TSS);
11321 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11326 static void tg3_ints_init(struct tg3 *tp)
11328 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11329 !tg3_flag(tp, TAGGED_STATUS)) {
11330 /* All MSI supporting chips should support tagged
11331 * status. Assert that this is the case.
11333 netdev_warn(tp->dev,
11334 "MSI without TAGGED_STATUS? Not using MSI\n");
11338 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11339 tg3_flag_set(tp, USING_MSIX);
11340 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11341 tg3_flag_set(tp, USING_MSI);
11343 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11344 u32 msi_mode = tr32(MSGINT_MODE);
11345 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11346 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11347 if (!tg3_flag(tp, 1SHOT_MSI))
11348 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11349 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11352 if (!tg3_flag(tp, USING_MSIX)) {
11354 tp->napi[0].irq_vec = tp->pdev->irq;
11357 if (tp->irq_cnt == 1) {
11360 netif_set_real_num_tx_queues(tp->dev, 1);
11361 netif_set_real_num_rx_queues(tp->dev, 1);
11365 static void tg3_ints_fini(struct tg3 *tp)
11367 if (tg3_flag(tp, USING_MSIX))
11368 pci_disable_msix(tp->pdev);
11369 else if (tg3_flag(tp, USING_MSI))
11370 pci_disable_msi(tp->pdev);
11371 tg3_flag_clear(tp, USING_MSI);
11372 tg3_flag_clear(tp, USING_MSIX);
11373 tg3_flag_clear(tp, ENABLE_RSS);
11374 tg3_flag_clear(tp, ENABLE_TSS);
11377 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11380 struct net_device *dev = tp->dev;
11384 * Setup interrupts first so we know how
11385 * many NAPI resources to allocate
11389 tg3_rss_check_indir_tbl(tp);
11391 /* The placement of this call is tied
11392 * to the setup and use of Host TX descriptors.
11394 err = tg3_alloc_consistent(tp);
11396 goto out_ints_fini;
11400 tg3_napi_enable(tp);
11402 for (i = 0; i < tp->irq_cnt; i++) {
11403 struct tg3_napi *tnapi = &tp->napi[i];
11404 err = tg3_request_irq(tp, i);
11406 for (i--; i >= 0; i--) {
11407 tnapi = &tp->napi[i];
11408 free_irq(tnapi->irq_vec, tnapi);
11410 goto out_napi_fini;
11414 tg3_full_lock(tp, 0);
11417 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11419 err = tg3_init_hw(tp, reset_phy);
11421 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11422 tg3_free_rings(tp);
11425 tg3_full_unlock(tp);
11430 if (test_irq && tg3_flag(tp, USING_MSI)) {
11431 err = tg3_test_msi(tp);
11434 tg3_full_lock(tp, 0);
11435 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11436 tg3_free_rings(tp);
11437 tg3_full_unlock(tp);
11439 goto out_napi_fini;
11442 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11443 u32 val = tr32(PCIE_TRANSACTION_CFG);
11445 tw32(PCIE_TRANSACTION_CFG,
11446 val | PCIE_TRANS_CFG_1SHOT_MSI);
11452 tg3_hwmon_open(tp);
11454 tg3_full_lock(tp, 0);
11456 tg3_timer_start(tp);
11457 tg3_flag_set(tp, INIT_COMPLETE);
11458 tg3_enable_ints(tp);
11463 tg3_ptp_resume(tp);
11466 tg3_full_unlock(tp);
11468 netif_tx_start_all_queues(dev);
11471 * Reset loopback feature if it was turned on while the device was down
11472 * make sure that it's installed properly now.
11474 if (dev->features & NETIF_F_LOOPBACK)
11475 tg3_set_loopback(dev, dev->features);
11480 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11481 struct tg3_napi *tnapi = &tp->napi[i];
11482 free_irq(tnapi->irq_vec, tnapi);
11486 tg3_napi_disable(tp);
11488 tg3_free_consistent(tp);
11496 static void tg3_stop(struct tg3 *tp)
11500 tg3_reset_task_cancel(tp);
11501 tg3_netif_stop(tp);
11503 tg3_timer_stop(tp);
11505 tg3_hwmon_close(tp);
11509 tg3_full_lock(tp, 1);
11511 tg3_disable_ints(tp);
11513 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11514 tg3_free_rings(tp);
11515 tg3_flag_clear(tp, INIT_COMPLETE);
11517 tg3_full_unlock(tp);
11519 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11520 struct tg3_napi *tnapi = &tp->napi[i];
11521 free_irq(tnapi->irq_vec, tnapi);
11528 tg3_free_consistent(tp);
11531 static int tg3_open(struct net_device *dev)
11533 struct tg3 *tp = netdev_priv(dev);
11536 if (tp->fw_needed) {
11537 err = tg3_request_firmware(tp);
11538 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11540 netdev_warn(tp->dev, "EEE capability disabled\n");
11541 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11542 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11543 netdev_warn(tp->dev, "EEE capability restored\n");
11544 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11546 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11550 netdev_warn(tp->dev, "TSO capability disabled\n");
11551 tg3_flag_clear(tp, TSO_CAPABLE);
11552 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11553 netdev_notice(tp->dev, "TSO capability restored\n");
11554 tg3_flag_set(tp, TSO_CAPABLE);
11558 tg3_carrier_off(tp);
11560 err = tg3_power_up(tp);
11564 tg3_full_lock(tp, 0);
11566 tg3_disable_ints(tp);
11567 tg3_flag_clear(tp, INIT_COMPLETE);
11569 tg3_full_unlock(tp);
11571 err = tg3_start(tp,
11572 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11575 tg3_frob_aux_power(tp, false);
11576 pci_set_power_state(tp->pdev, PCI_D3hot);
11579 if (tg3_flag(tp, PTP_CAPABLE)) {
11580 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11582 if (IS_ERR(tp->ptp_clock))
11583 tp->ptp_clock = NULL;
11589 static int tg3_close(struct net_device *dev)
11591 struct tg3 *tp = netdev_priv(dev);
11597 /* Clear stats across close / open calls */
11598 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11599 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11601 if (pci_device_is_present(tp->pdev)) {
11602 tg3_power_down_prepare(tp);
11604 tg3_carrier_off(tp);
11609 static inline u64 get_stat64(tg3_stat64_t *val)
11611 return ((u64)val->high << 32) | ((u64)val->low);
11614 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11616 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11618 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11619 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11620 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11623 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11624 tg3_writephy(tp, MII_TG3_TEST1,
11625 val | MII_TG3_TEST1_CRC_EN);
11626 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11630 tp->phy_crc_errors += val;
11632 return tp->phy_crc_errors;
11635 return get_stat64(&hw_stats->rx_fcs_errors);
11638 #define ESTAT_ADD(member) \
11639 estats->member = old_estats->member + \
11640 get_stat64(&hw_stats->member)
11642 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11644 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11645 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11647 ESTAT_ADD(rx_octets);
11648 ESTAT_ADD(rx_fragments);
11649 ESTAT_ADD(rx_ucast_packets);
11650 ESTAT_ADD(rx_mcast_packets);
11651 ESTAT_ADD(rx_bcast_packets);
11652 ESTAT_ADD(rx_fcs_errors);
11653 ESTAT_ADD(rx_align_errors);
11654 ESTAT_ADD(rx_xon_pause_rcvd);
11655 ESTAT_ADD(rx_xoff_pause_rcvd);
11656 ESTAT_ADD(rx_mac_ctrl_rcvd);
11657 ESTAT_ADD(rx_xoff_entered);
11658 ESTAT_ADD(rx_frame_too_long_errors);
11659 ESTAT_ADD(rx_jabbers);
11660 ESTAT_ADD(rx_undersize_packets);
11661 ESTAT_ADD(rx_in_length_errors);
11662 ESTAT_ADD(rx_out_length_errors);
11663 ESTAT_ADD(rx_64_or_less_octet_packets);
11664 ESTAT_ADD(rx_65_to_127_octet_packets);
11665 ESTAT_ADD(rx_128_to_255_octet_packets);
11666 ESTAT_ADD(rx_256_to_511_octet_packets);
11667 ESTAT_ADD(rx_512_to_1023_octet_packets);
11668 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11669 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11670 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11671 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11672 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11674 ESTAT_ADD(tx_octets);
11675 ESTAT_ADD(tx_collisions);
11676 ESTAT_ADD(tx_xon_sent);
11677 ESTAT_ADD(tx_xoff_sent);
11678 ESTAT_ADD(tx_flow_control);
11679 ESTAT_ADD(tx_mac_errors);
11680 ESTAT_ADD(tx_single_collisions);
11681 ESTAT_ADD(tx_mult_collisions);
11682 ESTAT_ADD(tx_deferred);
11683 ESTAT_ADD(tx_excessive_collisions);
11684 ESTAT_ADD(tx_late_collisions);
11685 ESTAT_ADD(tx_collide_2times);
11686 ESTAT_ADD(tx_collide_3times);
11687 ESTAT_ADD(tx_collide_4times);
11688 ESTAT_ADD(tx_collide_5times);
11689 ESTAT_ADD(tx_collide_6times);
11690 ESTAT_ADD(tx_collide_7times);
11691 ESTAT_ADD(tx_collide_8times);
11692 ESTAT_ADD(tx_collide_9times);
11693 ESTAT_ADD(tx_collide_10times);
11694 ESTAT_ADD(tx_collide_11times);
11695 ESTAT_ADD(tx_collide_12times);
11696 ESTAT_ADD(tx_collide_13times);
11697 ESTAT_ADD(tx_collide_14times);
11698 ESTAT_ADD(tx_collide_15times);
11699 ESTAT_ADD(tx_ucast_packets);
11700 ESTAT_ADD(tx_mcast_packets);
11701 ESTAT_ADD(tx_bcast_packets);
11702 ESTAT_ADD(tx_carrier_sense_errors);
11703 ESTAT_ADD(tx_discards);
11704 ESTAT_ADD(tx_errors);
11706 ESTAT_ADD(dma_writeq_full);
11707 ESTAT_ADD(dma_write_prioq_full);
11708 ESTAT_ADD(rxbds_empty);
11709 ESTAT_ADD(rx_discards);
11710 ESTAT_ADD(rx_errors);
11711 ESTAT_ADD(rx_threshold_hit);
11713 ESTAT_ADD(dma_readq_full);
11714 ESTAT_ADD(dma_read_prioq_full);
11715 ESTAT_ADD(tx_comp_queue_full);
11717 ESTAT_ADD(ring_set_send_prod_index);
11718 ESTAT_ADD(ring_status_update);
11719 ESTAT_ADD(nic_irqs);
11720 ESTAT_ADD(nic_avoided_irqs);
11721 ESTAT_ADD(nic_tx_threshold_hit);
11723 ESTAT_ADD(mbuf_lwm_thresh_hit);
11726 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11728 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11729 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11731 stats->rx_packets = old_stats->rx_packets +
11732 get_stat64(&hw_stats->rx_ucast_packets) +
11733 get_stat64(&hw_stats->rx_mcast_packets) +
11734 get_stat64(&hw_stats->rx_bcast_packets);
11736 stats->tx_packets = old_stats->tx_packets +
11737 get_stat64(&hw_stats->tx_ucast_packets) +
11738 get_stat64(&hw_stats->tx_mcast_packets) +
11739 get_stat64(&hw_stats->tx_bcast_packets);
11741 stats->rx_bytes = old_stats->rx_bytes +
11742 get_stat64(&hw_stats->rx_octets);
11743 stats->tx_bytes = old_stats->tx_bytes +
11744 get_stat64(&hw_stats->tx_octets);
11746 stats->rx_errors = old_stats->rx_errors +
11747 get_stat64(&hw_stats->rx_errors);
11748 stats->tx_errors = old_stats->tx_errors +
11749 get_stat64(&hw_stats->tx_errors) +
11750 get_stat64(&hw_stats->tx_mac_errors) +
11751 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11752 get_stat64(&hw_stats->tx_discards);
11754 stats->multicast = old_stats->multicast +
11755 get_stat64(&hw_stats->rx_mcast_packets);
11756 stats->collisions = old_stats->collisions +
11757 get_stat64(&hw_stats->tx_collisions);
11759 stats->rx_length_errors = old_stats->rx_length_errors +
11760 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11761 get_stat64(&hw_stats->rx_undersize_packets);
11763 stats->rx_frame_errors = old_stats->rx_frame_errors +
11764 get_stat64(&hw_stats->rx_align_errors);
11765 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11766 get_stat64(&hw_stats->tx_discards);
11767 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11768 get_stat64(&hw_stats->tx_carrier_sense_errors);
11770 stats->rx_crc_errors = old_stats->rx_crc_errors +
11771 tg3_calc_crc_errors(tp);
11773 stats->rx_missed_errors = old_stats->rx_missed_errors +
11774 get_stat64(&hw_stats->rx_discards);
11776 stats->rx_dropped = tp->rx_dropped;
11777 stats->tx_dropped = tp->tx_dropped;
11780 static int tg3_get_regs_len(struct net_device *dev)
11782 return TG3_REG_BLK_SIZE;
11785 static void tg3_get_regs(struct net_device *dev,
11786 struct ethtool_regs *regs, void *_p)
11788 struct tg3 *tp = netdev_priv(dev);
11792 memset(_p, 0, TG3_REG_BLK_SIZE);
11794 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11797 tg3_full_lock(tp, 0);
11799 tg3_dump_legacy_regs(tp, (u32 *)_p);
11801 tg3_full_unlock(tp);
11804 static int tg3_get_eeprom_len(struct net_device *dev)
11806 struct tg3 *tp = netdev_priv(dev);
11808 return tp->nvram_size;
11811 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11813 struct tg3 *tp = netdev_priv(dev);
11816 u32 i, offset, len, b_offset, b_count;
11819 if (tg3_flag(tp, NO_NVRAM))
11822 offset = eeprom->offset;
11826 eeprom->magic = TG3_EEPROM_MAGIC;
11829 /* adjustments to start on required 4 byte boundary */
11830 b_offset = offset & 3;
11831 b_count = 4 - b_offset;
11832 if (b_count > len) {
11833 /* i.e. offset=1 len=2 */
11836 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11839 memcpy(data, ((char *)&val) + b_offset, b_count);
11842 eeprom->len += b_count;
11845 /* read bytes up to the last 4 byte boundary */
11846 pd = &data[eeprom->len];
11847 for (i = 0; i < (len - (len & 3)); i += 4) {
11848 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11853 memcpy(pd + i, &val, 4);
11858 /* read last bytes not ending on 4 byte boundary */
11859 pd = &data[eeprom->len];
11861 b_offset = offset + len - b_count;
11862 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11865 memcpy(pd, &val, b_count);
11866 eeprom->len += b_count;
11871 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11873 struct tg3 *tp = netdev_priv(dev);
11875 u32 offset, len, b_offset, odd_len;
11879 if (tg3_flag(tp, NO_NVRAM) ||
11880 eeprom->magic != TG3_EEPROM_MAGIC)
11883 offset = eeprom->offset;
11886 if ((b_offset = (offset & 3))) {
11887 /* adjustments to start on required 4 byte boundary */
11888 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11899 /* adjustments to end on required 4 byte boundary */
11901 len = (len + 3) & ~3;
11902 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11908 if (b_offset || odd_len) {
11909 buf = kmalloc(len, GFP_KERNEL);
11913 memcpy(buf, &start, 4);
11915 memcpy(buf+len-4, &end, 4);
11916 memcpy(buf + b_offset, data, eeprom->len);
11919 ret = tg3_nvram_write_block(tp, offset, len, buf);
11927 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11929 struct tg3 *tp = netdev_priv(dev);
11931 if (tg3_flag(tp, USE_PHYLIB)) {
11932 struct phy_device *phydev;
11933 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11935 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
11936 return phy_ethtool_gset(phydev, cmd);
11939 cmd->supported = (SUPPORTED_Autoneg);
11941 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11942 cmd->supported |= (SUPPORTED_1000baseT_Half |
11943 SUPPORTED_1000baseT_Full);
11945 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11946 cmd->supported |= (SUPPORTED_100baseT_Half |
11947 SUPPORTED_100baseT_Full |
11948 SUPPORTED_10baseT_Half |
11949 SUPPORTED_10baseT_Full |
11951 cmd->port = PORT_TP;
11953 cmd->supported |= SUPPORTED_FIBRE;
11954 cmd->port = PORT_FIBRE;
11957 cmd->advertising = tp->link_config.advertising;
11958 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11959 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11960 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11961 cmd->advertising |= ADVERTISED_Pause;
11963 cmd->advertising |= ADVERTISED_Pause |
11964 ADVERTISED_Asym_Pause;
11966 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11967 cmd->advertising |= ADVERTISED_Asym_Pause;
11970 if (netif_running(dev) && tp->link_up) {
11971 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11972 cmd->duplex = tp->link_config.active_duplex;
11973 cmd->lp_advertising = tp->link_config.rmt_adv;
11974 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11975 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11976 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11978 cmd->eth_tp_mdix = ETH_TP_MDI;
11981 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11982 cmd->duplex = DUPLEX_UNKNOWN;
11983 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11985 cmd->phy_address = tp->phy_addr;
11986 cmd->transceiver = XCVR_INTERNAL;
11987 cmd->autoneg = tp->link_config.autoneg;
11993 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11995 struct tg3 *tp = netdev_priv(dev);
11996 u32 speed = ethtool_cmd_speed(cmd);
11998 if (tg3_flag(tp, USE_PHYLIB)) {
11999 struct phy_device *phydev;
12000 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12002 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12003 return phy_ethtool_sset(phydev, cmd);
12006 if (cmd->autoneg != AUTONEG_ENABLE &&
12007 cmd->autoneg != AUTONEG_DISABLE)
12010 if (cmd->autoneg == AUTONEG_DISABLE &&
12011 cmd->duplex != DUPLEX_FULL &&
12012 cmd->duplex != DUPLEX_HALF)
12015 if (cmd->autoneg == AUTONEG_ENABLE) {
12016 u32 mask = ADVERTISED_Autoneg |
12018 ADVERTISED_Asym_Pause;
12020 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12021 mask |= ADVERTISED_1000baseT_Half |
12022 ADVERTISED_1000baseT_Full;
12024 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12025 mask |= ADVERTISED_100baseT_Half |
12026 ADVERTISED_100baseT_Full |
12027 ADVERTISED_10baseT_Half |
12028 ADVERTISED_10baseT_Full |
12031 mask |= ADVERTISED_FIBRE;
12033 if (cmd->advertising & ~mask)
12036 mask &= (ADVERTISED_1000baseT_Half |
12037 ADVERTISED_1000baseT_Full |
12038 ADVERTISED_100baseT_Half |
12039 ADVERTISED_100baseT_Full |
12040 ADVERTISED_10baseT_Half |
12041 ADVERTISED_10baseT_Full);
12043 cmd->advertising &= mask;
12045 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12046 if (speed != SPEED_1000)
12049 if (cmd->duplex != DUPLEX_FULL)
12052 if (speed != SPEED_100 &&
12058 tg3_full_lock(tp, 0);
12060 tp->link_config.autoneg = cmd->autoneg;
12061 if (cmd->autoneg == AUTONEG_ENABLE) {
12062 tp->link_config.advertising = (cmd->advertising |
12063 ADVERTISED_Autoneg);
12064 tp->link_config.speed = SPEED_UNKNOWN;
12065 tp->link_config.duplex = DUPLEX_UNKNOWN;
12067 tp->link_config.advertising = 0;
12068 tp->link_config.speed = speed;
12069 tp->link_config.duplex = cmd->duplex;
12072 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12074 tg3_warn_mgmt_link_flap(tp);
12076 if (netif_running(dev))
12077 tg3_setup_phy(tp, true);
12079 tg3_full_unlock(tp);
12084 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12086 struct tg3 *tp = netdev_priv(dev);
12088 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12089 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12090 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12091 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12094 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12096 struct tg3 *tp = netdev_priv(dev);
12098 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12099 wol->supported = WAKE_MAGIC;
12101 wol->supported = 0;
12103 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12104 wol->wolopts = WAKE_MAGIC;
12105 memset(&wol->sopass, 0, sizeof(wol->sopass));
12108 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12110 struct tg3 *tp = netdev_priv(dev);
12111 struct device *dp = &tp->pdev->dev;
12113 if (wol->wolopts & ~WAKE_MAGIC)
12115 if ((wol->wolopts & WAKE_MAGIC) &&
12116 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12119 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12121 if (device_may_wakeup(dp))
12122 tg3_flag_set(tp, WOL_ENABLE);
12124 tg3_flag_clear(tp, WOL_ENABLE);
12129 static u32 tg3_get_msglevel(struct net_device *dev)
12131 struct tg3 *tp = netdev_priv(dev);
12132 return tp->msg_enable;
12135 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12137 struct tg3 *tp = netdev_priv(dev);
12138 tp->msg_enable = value;
12141 static int tg3_nway_reset(struct net_device *dev)
12143 struct tg3 *tp = netdev_priv(dev);
12146 if (!netif_running(dev))
12149 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12152 tg3_warn_mgmt_link_flap(tp);
12154 if (tg3_flag(tp, USE_PHYLIB)) {
12155 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12157 r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12161 spin_lock_bh(&tp->lock);
12163 tg3_readphy(tp, MII_BMCR, &bmcr);
12164 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12165 ((bmcr & BMCR_ANENABLE) ||
12166 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12167 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12171 spin_unlock_bh(&tp->lock);
12177 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12179 struct tg3 *tp = netdev_priv(dev);
12181 ering->rx_max_pending = tp->rx_std_ring_mask;
12182 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12183 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12185 ering->rx_jumbo_max_pending = 0;
12187 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12189 ering->rx_pending = tp->rx_pending;
12190 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12191 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12193 ering->rx_jumbo_pending = 0;
12195 ering->tx_pending = tp->napi[0].tx_pending;
12198 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12200 struct tg3 *tp = netdev_priv(dev);
12201 int i, irq_sync = 0, err = 0;
12203 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12204 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12205 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12206 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12207 (tg3_flag(tp, TSO_BUG) &&
12208 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12211 if (netif_running(dev)) {
12213 tg3_netif_stop(tp);
12217 tg3_full_lock(tp, irq_sync);
12219 tp->rx_pending = ering->rx_pending;
12221 if (tg3_flag(tp, MAX_RXPEND_64) &&
12222 tp->rx_pending > 63)
12223 tp->rx_pending = 63;
12224 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12226 for (i = 0; i < tp->irq_max; i++)
12227 tp->napi[i].tx_pending = ering->tx_pending;
12229 if (netif_running(dev)) {
12230 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12231 err = tg3_restart_hw(tp, false);
12233 tg3_netif_start(tp);
12236 tg3_full_unlock(tp);
12238 if (irq_sync && !err)
12244 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12246 struct tg3 *tp = netdev_priv(dev);
12248 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12250 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12251 epause->rx_pause = 1;
12253 epause->rx_pause = 0;
12255 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12256 epause->tx_pause = 1;
12258 epause->tx_pause = 0;
12261 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12263 struct tg3 *tp = netdev_priv(dev);
12266 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12267 tg3_warn_mgmt_link_flap(tp);
12269 if (tg3_flag(tp, USE_PHYLIB)) {
12271 struct phy_device *phydev;
12273 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12275 if (!(phydev->supported & SUPPORTED_Pause) ||
12276 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12277 (epause->rx_pause != epause->tx_pause)))
12280 tp->link_config.flowctrl = 0;
12281 if (epause->rx_pause) {
12282 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12284 if (epause->tx_pause) {
12285 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12286 newadv = ADVERTISED_Pause;
12288 newadv = ADVERTISED_Pause |
12289 ADVERTISED_Asym_Pause;
12290 } else if (epause->tx_pause) {
12291 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12292 newadv = ADVERTISED_Asym_Pause;
12296 if (epause->autoneg)
12297 tg3_flag_set(tp, PAUSE_AUTONEG);
12299 tg3_flag_clear(tp, PAUSE_AUTONEG);
12301 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12302 u32 oldadv = phydev->advertising &
12303 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12304 if (oldadv != newadv) {
12305 phydev->advertising &=
12306 ~(ADVERTISED_Pause |
12307 ADVERTISED_Asym_Pause);
12308 phydev->advertising |= newadv;
12309 if (phydev->autoneg) {
12311 * Always renegotiate the link to
12312 * inform our link partner of our
12313 * flow control settings, even if the
12314 * flow control is forced. Let
12315 * tg3_adjust_link() do the final
12316 * flow control setup.
12318 return phy_start_aneg(phydev);
12322 if (!epause->autoneg)
12323 tg3_setup_flow_control(tp, 0, 0);
12325 tp->link_config.advertising &=
12326 ~(ADVERTISED_Pause |
12327 ADVERTISED_Asym_Pause);
12328 tp->link_config.advertising |= newadv;
12333 if (netif_running(dev)) {
12334 tg3_netif_stop(tp);
12338 tg3_full_lock(tp, irq_sync);
12340 if (epause->autoneg)
12341 tg3_flag_set(tp, PAUSE_AUTONEG);
12343 tg3_flag_clear(tp, PAUSE_AUTONEG);
12344 if (epause->rx_pause)
12345 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12347 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12348 if (epause->tx_pause)
12349 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12351 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12353 if (netif_running(dev)) {
12354 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12355 err = tg3_restart_hw(tp, false);
12357 tg3_netif_start(tp);
12360 tg3_full_unlock(tp);
12363 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12368 static int tg3_get_sset_count(struct net_device *dev, int sset)
12372 return TG3_NUM_TEST;
12374 return TG3_NUM_STATS;
12376 return -EOPNOTSUPP;
12380 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12381 u32 *rules __always_unused)
12383 struct tg3 *tp = netdev_priv(dev);
12385 if (!tg3_flag(tp, SUPPORT_MSIX))
12386 return -EOPNOTSUPP;
12388 switch (info->cmd) {
12389 case ETHTOOL_GRXRINGS:
12390 if (netif_running(tp->dev))
12391 info->data = tp->rxq_cnt;
12393 info->data = num_online_cpus();
12394 if (info->data > TG3_RSS_MAX_NUM_QS)
12395 info->data = TG3_RSS_MAX_NUM_QS;
12398 /* The first interrupt vector only
12399 * handles link interrupts.
12405 return -EOPNOTSUPP;
12409 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12412 struct tg3 *tp = netdev_priv(dev);
12414 if (tg3_flag(tp, SUPPORT_MSIX))
12415 size = TG3_RSS_INDIR_TBL_SIZE;
12420 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12422 struct tg3 *tp = netdev_priv(dev);
12425 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12426 indir[i] = tp->rss_ind_tbl[i];
12431 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12433 struct tg3 *tp = netdev_priv(dev);
12436 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12437 tp->rss_ind_tbl[i] = indir[i];
12439 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12442 /* It is legal to write the indirection
12443 * table while the device is running.
12445 tg3_full_lock(tp, 0);
12446 tg3_rss_write_indir_tbl(tp);
12447 tg3_full_unlock(tp);
12452 static void tg3_get_channels(struct net_device *dev,
12453 struct ethtool_channels *channel)
12455 struct tg3 *tp = netdev_priv(dev);
12456 u32 deflt_qs = netif_get_num_default_rss_queues();
12458 channel->max_rx = tp->rxq_max;
12459 channel->max_tx = tp->txq_max;
12461 if (netif_running(dev)) {
12462 channel->rx_count = tp->rxq_cnt;
12463 channel->tx_count = tp->txq_cnt;
12466 channel->rx_count = tp->rxq_req;
12468 channel->rx_count = min(deflt_qs, tp->rxq_max);
12471 channel->tx_count = tp->txq_req;
12473 channel->tx_count = min(deflt_qs, tp->txq_max);
12477 static int tg3_set_channels(struct net_device *dev,
12478 struct ethtool_channels *channel)
12480 struct tg3 *tp = netdev_priv(dev);
12482 if (!tg3_flag(tp, SUPPORT_MSIX))
12483 return -EOPNOTSUPP;
12485 if (channel->rx_count > tp->rxq_max ||
12486 channel->tx_count > tp->txq_max)
12489 tp->rxq_req = channel->rx_count;
12490 tp->txq_req = channel->tx_count;
12492 if (!netif_running(dev))
12497 tg3_carrier_off(tp);
12499 tg3_start(tp, true, false, false);
12504 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12506 switch (stringset) {
12508 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12511 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12514 WARN_ON(1); /* we need a WARN() */
12519 static int tg3_set_phys_id(struct net_device *dev,
12520 enum ethtool_phys_id_state state)
12522 struct tg3 *tp = netdev_priv(dev);
12524 if (!netif_running(tp->dev))
12528 case ETHTOOL_ID_ACTIVE:
12529 return 1; /* cycle on/off once per second */
12531 case ETHTOOL_ID_ON:
12532 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12533 LED_CTRL_1000MBPS_ON |
12534 LED_CTRL_100MBPS_ON |
12535 LED_CTRL_10MBPS_ON |
12536 LED_CTRL_TRAFFIC_OVERRIDE |
12537 LED_CTRL_TRAFFIC_BLINK |
12538 LED_CTRL_TRAFFIC_LED);
12541 case ETHTOOL_ID_OFF:
12542 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12543 LED_CTRL_TRAFFIC_OVERRIDE);
12546 case ETHTOOL_ID_INACTIVE:
12547 tw32(MAC_LED_CTRL, tp->led_ctrl);
12554 static void tg3_get_ethtool_stats(struct net_device *dev,
12555 struct ethtool_stats *estats, u64 *tmp_stats)
12557 struct tg3 *tp = netdev_priv(dev);
12560 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12562 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12565 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12569 u32 offset = 0, len = 0;
12572 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12575 if (magic == TG3_EEPROM_MAGIC) {
12576 for (offset = TG3_NVM_DIR_START;
12577 offset < TG3_NVM_DIR_END;
12578 offset += TG3_NVM_DIRENT_SIZE) {
12579 if (tg3_nvram_read(tp, offset, &val))
12582 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12583 TG3_NVM_DIRTYPE_EXTVPD)
12587 if (offset != TG3_NVM_DIR_END) {
12588 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12589 if (tg3_nvram_read(tp, offset + 4, &offset))
12592 offset = tg3_nvram_logical_addr(tp, offset);
12596 if (!offset || !len) {
12597 offset = TG3_NVM_VPD_OFF;
12598 len = TG3_NVM_VPD_LEN;
12601 buf = kmalloc(len, GFP_KERNEL);
12605 if (magic == TG3_EEPROM_MAGIC) {
12606 for (i = 0; i < len; i += 4) {
12607 /* The data is in little-endian format in NVRAM.
12608 * Use the big-endian read routines to preserve
12609 * the byte order as it exists in NVRAM.
12611 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12617 unsigned int pos = 0;
12619 ptr = (u8 *)&buf[0];
12620 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12621 cnt = pci_read_vpd(tp->pdev, pos,
12623 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12641 #define NVRAM_TEST_SIZE 0x100
12642 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12643 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12644 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12645 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12646 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12647 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12648 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12649 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12651 static int tg3_test_nvram(struct tg3 *tp)
12653 u32 csum, magic, len;
12655 int i, j, k, err = 0, size;
12657 if (tg3_flag(tp, NO_NVRAM))
12660 if (tg3_nvram_read(tp, 0, &magic) != 0)
12663 if (magic == TG3_EEPROM_MAGIC)
12664 size = NVRAM_TEST_SIZE;
12665 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12666 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12667 TG3_EEPROM_SB_FORMAT_1) {
12668 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12669 case TG3_EEPROM_SB_REVISION_0:
12670 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12672 case TG3_EEPROM_SB_REVISION_2:
12673 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12675 case TG3_EEPROM_SB_REVISION_3:
12676 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12678 case TG3_EEPROM_SB_REVISION_4:
12679 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12681 case TG3_EEPROM_SB_REVISION_5:
12682 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12684 case TG3_EEPROM_SB_REVISION_6:
12685 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12692 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12693 size = NVRAM_SELFBOOT_HW_SIZE;
12697 buf = kmalloc(size, GFP_KERNEL);
12702 for (i = 0, j = 0; i < size; i += 4, j++) {
12703 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12710 /* Selfboot format */
12711 magic = be32_to_cpu(buf[0]);
12712 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12713 TG3_EEPROM_MAGIC_FW) {
12714 u8 *buf8 = (u8 *) buf, csum8 = 0;
12716 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12717 TG3_EEPROM_SB_REVISION_2) {
12718 /* For rev 2, the csum doesn't include the MBA. */
12719 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12721 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12724 for (i = 0; i < size; i++)
12737 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12738 TG3_EEPROM_MAGIC_HW) {
12739 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12740 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12741 u8 *buf8 = (u8 *) buf;
12743 /* Separate the parity bits and the data bytes. */
12744 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12745 if ((i == 0) || (i == 8)) {
12749 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12750 parity[k++] = buf8[i] & msk;
12752 } else if (i == 16) {
12756 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12757 parity[k++] = buf8[i] & msk;
12760 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12761 parity[k++] = buf8[i] & msk;
12764 data[j++] = buf8[i];
12768 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12769 u8 hw8 = hweight8(data[i]);
12771 if ((hw8 & 0x1) && parity[i])
12773 else if (!(hw8 & 0x1) && !parity[i])
12782 /* Bootstrap checksum at offset 0x10 */
12783 csum = calc_crc((unsigned char *) buf, 0x10);
12784 if (csum != le32_to_cpu(buf[0x10/4]))
12787 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12788 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12789 if (csum != le32_to_cpu(buf[0xfc/4]))
12794 buf = tg3_vpd_readblock(tp, &len);
12798 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12800 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12804 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12807 i += PCI_VPD_LRDT_TAG_SIZE;
12808 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12809 PCI_VPD_RO_KEYWORD_CHKSUM);
12813 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12815 for (i = 0; i <= j; i++)
12816 csum8 += ((u8 *)buf)[i];
12830 #define TG3_SERDES_TIMEOUT_SEC 2
12831 #define TG3_COPPER_TIMEOUT_SEC 6
12833 static int tg3_test_link(struct tg3 *tp)
12837 if (!netif_running(tp->dev))
12840 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12841 max = TG3_SERDES_TIMEOUT_SEC;
12843 max = TG3_COPPER_TIMEOUT_SEC;
12845 for (i = 0; i < max; i++) {
12849 if (msleep_interruptible(1000))
12856 /* Only test the commonly used registers */
12857 static int tg3_test_registers(struct tg3 *tp)
12859 int i, is_5705, is_5750;
12860 u32 offset, read_mask, write_mask, val, save_val, read_val;
12864 #define TG3_FL_5705 0x1
12865 #define TG3_FL_NOT_5705 0x2
12866 #define TG3_FL_NOT_5788 0x4
12867 #define TG3_FL_NOT_5750 0x8
12871 /* MAC Control Registers */
12872 { MAC_MODE, TG3_FL_NOT_5705,
12873 0x00000000, 0x00ef6f8c },
12874 { MAC_MODE, TG3_FL_5705,
12875 0x00000000, 0x01ef6b8c },
12876 { MAC_STATUS, TG3_FL_NOT_5705,
12877 0x03800107, 0x00000000 },
12878 { MAC_STATUS, TG3_FL_5705,
12879 0x03800100, 0x00000000 },
12880 { MAC_ADDR_0_HIGH, 0x0000,
12881 0x00000000, 0x0000ffff },
12882 { MAC_ADDR_0_LOW, 0x0000,
12883 0x00000000, 0xffffffff },
12884 { MAC_RX_MTU_SIZE, 0x0000,
12885 0x00000000, 0x0000ffff },
12886 { MAC_TX_MODE, 0x0000,
12887 0x00000000, 0x00000070 },
12888 { MAC_TX_LENGTHS, 0x0000,
12889 0x00000000, 0x00003fff },
12890 { MAC_RX_MODE, TG3_FL_NOT_5705,
12891 0x00000000, 0x000007fc },
12892 { MAC_RX_MODE, TG3_FL_5705,
12893 0x00000000, 0x000007dc },
12894 { MAC_HASH_REG_0, 0x0000,
12895 0x00000000, 0xffffffff },
12896 { MAC_HASH_REG_1, 0x0000,
12897 0x00000000, 0xffffffff },
12898 { MAC_HASH_REG_2, 0x0000,
12899 0x00000000, 0xffffffff },
12900 { MAC_HASH_REG_3, 0x0000,
12901 0x00000000, 0xffffffff },
12903 /* Receive Data and Receive BD Initiator Control Registers. */
12904 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12905 0x00000000, 0xffffffff },
12906 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12907 0x00000000, 0xffffffff },
12908 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12909 0x00000000, 0x00000003 },
12910 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12911 0x00000000, 0xffffffff },
12912 { RCVDBDI_STD_BD+0, 0x0000,
12913 0x00000000, 0xffffffff },
12914 { RCVDBDI_STD_BD+4, 0x0000,
12915 0x00000000, 0xffffffff },
12916 { RCVDBDI_STD_BD+8, 0x0000,
12917 0x00000000, 0xffff0002 },
12918 { RCVDBDI_STD_BD+0xc, 0x0000,
12919 0x00000000, 0xffffffff },
12921 /* Receive BD Initiator Control Registers. */
12922 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12923 0x00000000, 0xffffffff },
12924 { RCVBDI_STD_THRESH, TG3_FL_5705,
12925 0x00000000, 0x000003ff },
12926 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12927 0x00000000, 0xffffffff },
12929 /* Host Coalescing Control Registers. */
12930 { HOSTCC_MODE, TG3_FL_NOT_5705,
12931 0x00000000, 0x00000004 },
12932 { HOSTCC_MODE, TG3_FL_5705,
12933 0x00000000, 0x000000f6 },
12934 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12935 0x00000000, 0xffffffff },
12936 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12937 0x00000000, 0x000003ff },
12938 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12939 0x00000000, 0xffffffff },
12940 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12941 0x00000000, 0x000003ff },
12942 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12943 0x00000000, 0xffffffff },
12944 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12945 0x00000000, 0x000000ff },
12946 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12947 0x00000000, 0xffffffff },
12948 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12949 0x00000000, 0x000000ff },
12950 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12951 0x00000000, 0xffffffff },
12952 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12953 0x00000000, 0xffffffff },
12954 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12955 0x00000000, 0xffffffff },
12956 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12957 0x00000000, 0x000000ff },
12958 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12959 0x00000000, 0xffffffff },
12960 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12961 0x00000000, 0x000000ff },
12962 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12963 0x00000000, 0xffffffff },
12964 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12965 0x00000000, 0xffffffff },
12966 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12967 0x00000000, 0xffffffff },
12968 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12969 0x00000000, 0xffffffff },
12970 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12971 0x00000000, 0xffffffff },
12972 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12973 0xffffffff, 0x00000000 },
12974 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12975 0xffffffff, 0x00000000 },
12977 /* Buffer Manager Control Registers. */
12978 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12979 0x00000000, 0x007fff80 },
12980 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12981 0x00000000, 0x007fffff },
12982 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12983 0x00000000, 0x0000003f },
12984 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12985 0x00000000, 0x000001ff },
12986 { BUFMGR_MB_HIGH_WATER, 0x0000,
12987 0x00000000, 0x000001ff },
12988 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12989 0xffffffff, 0x00000000 },
12990 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12991 0xffffffff, 0x00000000 },
12993 /* Mailbox Registers */
12994 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12995 0x00000000, 0x000001ff },
12996 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12997 0x00000000, 0x000001ff },
12998 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12999 0x00000000, 0x000007ff },
13000 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13001 0x00000000, 0x000001ff },
13003 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
13006 is_5705 = is_5750 = 0;
13007 if (tg3_flag(tp, 5705_PLUS)) {
13009 if (tg3_flag(tp, 5750_PLUS))
13013 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13014 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13017 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13020 if (tg3_flag(tp, IS_5788) &&
13021 (reg_tbl[i].flags & TG3_FL_NOT_5788))
13024 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13027 offset = (u32) reg_tbl[i].offset;
13028 read_mask = reg_tbl[i].read_mask;
13029 write_mask = reg_tbl[i].write_mask;
13031 /* Save the original register content */
13032 save_val = tr32(offset);
13034 /* Determine the read-only value. */
13035 read_val = save_val & read_mask;
13037 /* Write zero to the register, then make sure the read-only bits
13038 * are not changed and the read/write bits are all zeros.
13042 val = tr32(offset);
13044 /* Test the read-only and read/write bits. */
13045 if (((val & read_mask) != read_val) || (val & write_mask))
13048 /* Write ones to all the bits defined by RdMask and WrMask, then
13049 * make sure the read-only bits are not changed and the
13050 * read/write bits are all ones.
13052 tw32(offset, read_mask | write_mask);
13054 val = tr32(offset);
13056 /* Test the read-only bits. */
13057 if ((val & read_mask) != read_val)
13060 /* Test the read/write bits. */
13061 if ((val & write_mask) != write_mask)
13064 tw32(offset, save_val);
13070 if (netif_msg_hw(tp))
13071 netdev_err(tp->dev,
13072 "Register test failed at offset %x\n", offset);
13073 tw32(offset, save_val);
13077 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13079 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13083 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13084 for (j = 0; j < len; j += 4) {
13087 tg3_write_mem(tp, offset + j, test_pattern[i]);
13088 tg3_read_mem(tp, offset + j, &val);
13089 if (val != test_pattern[i])
13096 static int tg3_test_memory(struct tg3 *tp)
13098 static struct mem_entry {
13101 } mem_tbl_570x[] = {
13102 { 0x00000000, 0x00b50},
13103 { 0x00002000, 0x1c000},
13104 { 0xffffffff, 0x00000}
13105 }, mem_tbl_5705[] = {
13106 { 0x00000100, 0x0000c},
13107 { 0x00000200, 0x00008},
13108 { 0x00004000, 0x00800},
13109 { 0x00006000, 0x01000},
13110 { 0x00008000, 0x02000},
13111 { 0x00010000, 0x0e000},
13112 { 0xffffffff, 0x00000}
13113 }, mem_tbl_5755[] = {
13114 { 0x00000200, 0x00008},
13115 { 0x00004000, 0x00800},
13116 { 0x00006000, 0x00800},
13117 { 0x00008000, 0x02000},
13118 { 0x00010000, 0x0c000},
13119 { 0xffffffff, 0x00000}
13120 }, mem_tbl_5906[] = {
13121 { 0x00000200, 0x00008},
13122 { 0x00004000, 0x00400},
13123 { 0x00006000, 0x00400},
13124 { 0x00008000, 0x01000},
13125 { 0x00010000, 0x01000},
13126 { 0xffffffff, 0x00000}
13127 }, mem_tbl_5717[] = {
13128 { 0x00000200, 0x00008},
13129 { 0x00010000, 0x0a000},
13130 { 0x00020000, 0x13c00},
13131 { 0xffffffff, 0x00000}
13132 }, mem_tbl_57765[] = {
13133 { 0x00000200, 0x00008},
13134 { 0x00004000, 0x00800},
13135 { 0x00006000, 0x09800},
13136 { 0x00010000, 0x0a000},
13137 { 0xffffffff, 0x00000}
13139 struct mem_entry *mem_tbl;
13143 if (tg3_flag(tp, 5717_PLUS))
13144 mem_tbl = mem_tbl_5717;
13145 else if (tg3_flag(tp, 57765_CLASS) ||
13146 tg3_asic_rev(tp) == ASIC_REV_5762)
13147 mem_tbl = mem_tbl_57765;
13148 else if (tg3_flag(tp, 5755_PLUS))
13149 mem_tbl = mem_tbl_5755;
13150 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13151 mem_tbl = mem_tbl_5906;
13152 else if (tg3_flag(tp, 5705_PLUS))
13153 mem_tbl = mem_tbl_5705;
13155 mem_tbl = mem_tbl_570x;
13157 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13158 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13166 #define TG3_TSO_MSS 500
13168 #define TG3_TSO_IP_HDR_LEN 20
13169 #define TG3_TSO_TCP_HDR_LEN 20
13170 #define TG3_TSO_TCP_OPT_LEN 12
13172 static const u8 tg3_tso_header[] = {
13174 0x45, 0x00, 0x00, 0x00,
13175 0x00, 0x00, 0x40, 0x00,
13176 0x40, 0x06, 0x00, 0x00,
13177 0x0a, 0x00, 0x00, 0x01,
13178 0x0a, 0x00, 0x00, 0x02,
13179 0x0d, 0x00, 0xe0, 0x00,
13180 0x00, 0x00, 0x01, 0x00,
13181 0x00, 0x00, 0x02, 0x00,
13182 0x80, 0x10, 0x10, 0x00,
13183 0x14, 0x09, 0x00, 0x00,
13184 0x01, 0x01, 0x08, 0x0a,
13185 0x11, 0x11, 0x11, 0x11,
13186 0x11, 0x11, 0x11, 0x11,
13189 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13191 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13192 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13194 struct sk_buff *skb;
13195 u8 *tx_data, *rx_data;
13197 int num_pkts, tx_len, rx_len, i, err;
13198 struct tg3_rx_buffer_desc *desc;
13199 struct tg3_napi *tnapi, *rnapi;
13200 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13202 tnapi = &tp->napi[0];
13203 rnapi = &tp->napi[0];
13204 if (tp->irq_cnt > 1) {
13205 if (tg3_flag(tp, ENABLE_RSS))
13206 rnapi = &tp->napi[1];
13207 if (tg3_flag(tp, ENABLE_TSS))
13208 tnapi = &tp->napi[1];
13210 coal_now = tnapi->coal_now | rnapi->coal_now;
13215 skb = netdev_alloc_skb(tp->dev, tx_len);
13219 tx_data = skb_put(skb, tx_len);
13220 memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13221 memset(tx_data + ETH_ALEN, 0x0, 8);
13223 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13225 if (tso_loopback) {
13226 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13228 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13229 TG3_TSO_TCP_OPT_LEN;
13231 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13232 sizeof(tg3_tso_header));
13235 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13236 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13238 /* Set the total length field in the IP header */
13239 iph->tot_len = htons((u16)(mss + hdr_len));
13241 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13242 TXD_FLAG_CPU_POST_DMA);
13244 if (tg3_flag(tp, HW_TSO_1) ||
13245 tg3_flag(tp, HW_TSO_2) ||
13246 tg3_flag(tp, HW_TSO_3)) {
13248 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13249 th = (struct tcphdr *)&tx_data[val];
13252 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13254 if (tg3_flag(tp, HW_TSO_3)) {
13255 mss |= (hdr_len & 0xc) << 12;
13256 if (hdr_len & 0x10)
13257 base_flags |= 0x00000010;
13258 base_flags |= (hdr_len & 0x3e0) << 5;
13259 } else if (tg3_flag(tp, HW_TSO_2))
13260 mss |= hdr_len << 9;
13261 else if (tg3_flag(tp, HW_TSO_1) ||
13262 tg3_asic_rev(tp) == ASIC_REV_5705) {
13263 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13265 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13268 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13271 data_off = ETH_HLEN;
13273 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13274 tx_len > VLAN_ETH_FRAME_LEN)
13275 base_flags |= TXD_FLAG_JMB_PKT;
13278 for (i = data_off; i < tx_len; i++)
13279 tx_data[i] = (u8) (i & 0xff);
13281 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13282 if (pci_dma_mapping_error(tp->pdev, map)) {
13283 dev_kfree_skb(skb);
13287 val = tnapi->tx_prod;
13288 tnapi->tx_buffers[val].skb = skb;
13289 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13291 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13296 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13298 budget = tg3_tx_avail(tnapi);
13299 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13300 base_flags | TXD_FLAG_END, mss, 0)) {
13301 tnapi->tx_buffers[val].skb = NULL;
13302 dev_kfree_skb(skb);
13308 /* Sync BD data before updating mailbox */
13311 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13312 tr32_mailbox(tnapi->prodmbox);
13316 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13317 for (i = 0; i < 35; i++) {
13318 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13323 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13324 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13325 if ((tx_idx == tnapi->tx_prod) &&
13326 (rx_idx == (rx_start_idx + num_pkts)))
13330 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13331 dev_kfree_skb(skb);
13333 if (tx_idx != tnapi->tx_prod)
13336 if (rx_idx != rx_start_idx + num_pkts)
13340 while (rx_idx != rx_start_idx) {
13341 desc = &rnapi->rx_rcb[rx_start_idx++];
13342 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13343 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13345 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13346 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13349 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13352 if (!tso_loopback) {
13353 if (rx_len != tx_len)
13356 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13357 if (opaque_key != RXD_OPAQUE_RING_STD)
13360 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13363 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13364 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13365 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13369 if (opaque_key == RXD_OPAQUE_RING_STD) {
13370 rx_data = tpr->rx_std_buffers[desc_idx].data;
13371 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13373 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13374 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13375 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13380 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13381 PCI_DMA_FROMDEVICE);
13383 rx_data += TG3_RX_OFFSET(tp);
13384 for (i = data_off; i < rx_len; i++, val++) {
13385 if (*(rx_data + i) != (u8) (val & 0xff))
13392 /* tg3_free_rings will unmap and free the rx_data */
13397 #define TG3_STD_LOOPBACK_FAILED 1
13398 #define TG3_JMB_LOOPBACK_FAILED 2
13399 #define TG3_TSO_LOOPBACK_FAILED 4
13400 #define TG3_LOOPBACK_FAILED \
13401 (TG3_STD_LOOPBACK_FAILED | \
13402 TG3_JMB_LOOPBACK_FAILED | \
13403 TG3_TSO_LOOPBACK_FAILED)
13405 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13409 u32 jmb_pkt_sz = 9000;
13412 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13414 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13415 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13417 if (!netif_running(tp->dev)) {
13418 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13419 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13421 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13425 err = tg3_reset_hw(tp, true);
13427 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13428 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13430 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13434 if (tg3_flag(tp, ENABLE_RSS)) {
13437 /* Reroute all rx packets to the 1st queue */
13438 for (i = MAC_RSS_INDIR_TBL_0;
13439 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13443 /* HW errata - mac loopback fails in some cases on 5780.
13444 * Normal traffic and PHY loopback are not affected by
13445 * errata. Also, the MAC loopback test is deprecated for
13446 * all newer ASIC revisions.
13448 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13449 !tg3_flag(tp, CPMU_PRESENT)) {
13450 tg3_mac_loopback(tp, true);
13452 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13453 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13455 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13456 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13457 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13459 tg3_mac_loopback(tp, false);
13462 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13463 !tg3_flag(tp, USE_PHYLIB)) {
13466 tg3_phy_lpbk_set(tp, 0, false);
13468 /* Wait for link */
13469 for (i = 0; i < 100; i++) {
13470 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13475 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13476 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13477 if (tg3_flag(tp, TSO_CAPABLE) &&
13478 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13479 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13480 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13481 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13482 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13485 tg3_phy_lpbk_set(tp, 0, true);
13487 /* All link indications report up, but the hardware
13488 * isn't really ready for about 20 msec. Double it
13493 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13494 data[TG3_EXT_LOOPB_TEST] |=
13495 TG3_STD_LOOPBACK_FAILED;
13496 if (tg3_flag(tp, TSO_CAPABLE) &&
13497 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13498 data[TG3_EXT_LOOPB_TEST] |=
13499 TG3_TSO_LOOPBACK_FAILED;
13500 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13501 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13502 data[TG3_EXT_LOOPB_TEST] |=
13503 TG3_JMB_LOOPBACK_FAILED;
13506 /* Re-enable gphy autopowerdown. */
13507 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13508 tg3_phy_toggle_apd(tp, true);
13511 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13512 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13515 tp->phy_flags |= eee_cap;
13520 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13523 struct tg3 *tp = netdev_priv(dev);
13524 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13526 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13527 if (tg3_power_up(tp)) {
13528 etest->flags |= ETH_TEST_FL_FAILED;
13529 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13532 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13535 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13537 if (tg3_test_nvram(tp) != 0) {
13538 etest->flags |= ETH_TEST_FL_FAILED;
13539 data[TG3_NVRAM_TEST] = 1;
13541 if (!doextlpbk && tg3_test_link(tp)) {
13542 etest->flags |= ETH_TEST_FL_FAILED;
13543 data[TG3_LINK_TEST] = 1;
13545 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13546 int err, err2 = 0, irq_sync = 0;
13548 if (netif_running(dev)) {
13550 tg3_netif_stop(tp);
13554 tg3_full_lock(tp, irq_sync);
13555 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13556 err = tg3_nvram_lock(tp);
13557 tg3_halt_cpu(tp, RX_CPU_BASE);
13558 if (!tg3_flag(tp, 5705_PLUS))
13559 tg3_halt_cpu(tp, TX_CPU_BASE);
13561 tg3_nvram_unlock(tp);
13563 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13566 if (tg3_test_registers(tp) != 0) {
13567 etest->flags |= ETH_TEST_FL_FAILED;
13568 data[TG3_REGISTER_TEST] = 1;
13571 if (tg3_test_memory(tp) != 0) {
13572 etest->flags |= ETH_TEST_FL_FAILED;
13573 data[TG3_MEMORY_TEST] = 1;
13577 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13579 if (tg3_test_loopback(tp, data, doextlpbk))
13580 etest->flags |= ETH_TEST_FL_FAILED;
13582 tg3_full_unlock(tp);
13584 if (tg3_test_interrupt(tp) != 0) {
13585 etest->flags |= ETH_TEST_FL_FAILED;
13586 data[TG3_INTERRUPT_TEST] = 1;
13589 tg3_full_lock(tp, 0);
13591 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13592 if (netif_running(dev)) {
13593 tg3_flag_set(tp, INIT_COMPLETE);
13594 err2 = tg3_restart_hw(tp, true);
13596 tg3_netif_start(tp);
13599 tg3_full_unlock(tp);
13601 if (irq_sync && !err2)
13604 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13605 tg3_power_down_prepare(tp);
13609 static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13611 struct tg3 *tp = netdev_priv(dev);
13612 struct hwtstamp_config stmpconf;
13614 if (!tg3_flag(tp, PTP_CAPABLE))
13615 return -EOPNOTSUPP;
13617 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13620 if (stmpconf.flags)
13623 if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13624 stmpconf.tx_type != HWTSTAMP_TX_OFF)
13627 switch (stmpconf.rx_filter) {
13628 case HWTSTAMP_FILTER_NONE:
13631 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13632 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13633 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13635 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13636 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13637 TG3_RX_PTP_CTL_SYNC_EVNT;
13639 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13640 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13641 TG3_RX_PTP_CTL_DELAY_REQ;
13643 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13644 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13645 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13647 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13648 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13649 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13651 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13652 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13653 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13655 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13656 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13657 TG3_RX_PTP_CTL_SYNC_EVNT;
13659 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13660 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13661 TG3_RX_PTP_CTL_SYNC_EVNT;
13663 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13664 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13665 TG3_RX_PTP_CTL_SYNC_EVNT;
13667 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13668 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13669 TG3_RX_PTP_CTL_DELAY_REQ;
13671 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13672 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13673 TG3_RX_PTP_CTL_DELAY_REQ;
13675 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13676 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13677 TG3_RX_PTP_CTL_DELAY_REQ;
13683 if (netif_running(dev) && tp->rxptpctl)
13684 tw32(TG3_RX_PTP_CTL,
13685 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13687 if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13688 tg3_flag_set(tp, TX_TSTAMP_EN);
13690 tg3_flag_clear(tp, TX_TSTAMP_EN);
13692 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13696 static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13698 struct tg3 *tp = netdev_priv(dev);
13699 struct hwtstamp_config stmpconf;
13701 if (!tg3_flag(tp, PTP_CAPABLE))
13702 return -EOPNOTSUPP;
13704 stmpconf.flags = 0;
13705 stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13706 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13708 switch (tp->rxptpctl) {
13710 stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13712 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13713 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13715 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13716 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13718 case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13719 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13721 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13722 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13724 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13725 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13727 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13728 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13730 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13731 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13733 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13734 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13736 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13737 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13739 case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13740 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13742 case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13743 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13745 case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13746 stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13753 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13757 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13759 struct mii_ioctl_data *data = if_mii(ifr);
13760 struct tg3 *tp = netdev_priv(dev);
13763 if (tg3_flag(tp, USE_PHYLIB)) {
13764 struct phy_device *phydev;
13765 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13767 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13768 return phy_mii_ioctl(phydev, ifr, cmd);
13773 data->phy_id = tp->phy_addr;
13776 case SIOCGMIIREG: {
13779 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13780 break; /* We have no PHY */
13782 if (!netif_running(dev))
13785 spin_lock_bh(&tp->lock);
13786 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13787 data->reg_num & 0x1f, &mii_regval);
13788 spin_unlock_bh(&tp->lock);
13790 data->val_out = mii_regval;
13796 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13797 break; /* We have no PHY */
13799 if (!netif_running(dev))
13802 spin_lock_bh(&tp->lock);
13803 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13804 data->reg_num & 0x1f, data->val_in);
13805 spin_unlock_bh(&tp->lock);
13809 case SIOCSHWTSTAMP:
13810 return tg3_hwtstamp_set(dev, ifr);
13812 case SIOCGHWTSTAMP:
13813 return tg3_hwtstamp_get(dev, ifr);
13819 return -EOPNOTSUPP;
13822 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13824 struct tg3 *tp = netdev_priv(dev);
13826 memcpy(ec, &tp->coal, sizeof(*ec));
13830 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13832 struct tg3 *tp = netdev_priv(dev);
13833 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13834 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13836 if (!tg3_flag(tp, 5705_PLUS)) {
13837 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13838 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13839 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13840 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13843 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13844 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13845 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13846 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13847 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13848 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13849 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13850 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13851 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13852 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13855 /* No rx interrupts will be generated if both are zero */
13856 if ((ec->rx_coalesce_usecs == 0) &&
13857 (ec->rx_max_coalesced_frames == 0))
13860 /* No tx interrupts will be generated if both are zero */
13861 if ((ec->tx_coalesce_usecs == 0) &&
13862 (ec->tx_max_coalesced_frames == 0))
13865 /* Only copy relevant parameters, ignore all others. */
13866 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13867 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13868 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13869 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13870 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13871 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13872 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13873 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13874 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13876 if (netif_running(dev)) {
13877 tg3_full_lock(tp, 0);
13878 __tg3_set_coalesce(tp, &tp->coal);
13879 tg3_full_unlock(tp);
13884 static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
13886 struct tg3 *tp = netdev_priv(dev);
13888 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13889 netdev_warn(tp->dev, "Board does not support EEE!\n");
13890 return -EOPNOTSUPP;
13893 if (edata->advertised != tp->eee.advertised) {
13894 netdev_warn(tp->dev,
13895 "Direct manipulation of EEE advertisement is not supported\n");
13899 if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
13900 netdev_warn(tp->dev,
13901 "Maximal Tx Lpi timer supported is %#x(u)\n",
13902 TG3_CPMU_DBTMR1_LNKIDLE_MAX);
13908 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
13909 tg3_warn_mgmt_link_flap(tp);
13911 if (netif_running(tp->dev)) {
13912 tg3_full_lock(tp, 0);
13915 tg3_full_unlock(tp);
13921 static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
13923 struct tg3 *tp = netdev_priv(dev);
13925 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
13926 netdev_warn(tp->dev,
13927 "Board does not support EEE!\n");
13928 return -EOPNOTSUPP;
13935 static const struct ethtool_ops tg3_ethtool_ops = {
13936 .get_settings = tg3_get_settings,
13937 .set_settings = tg3_set_settings,
13938 .get_drvinfo = tg3_get_drvinfo,
13939 .get_regs_len = tg3_get_regs_len,
13940 .get_regs = tg3_get_regs,
13941 .get_wol = tg3_get_wol,
13942 .set_wol = tg3_set_wol,
13943 .get_msglevel = tg3_get_msglevel,
13944 .set_msglevel = tg3_set_msglevel,
13945 .nway_reset = tg3_nway_reset,
13946 .get_link = ethtool_op_get_link,
13947 .get_eeprom_len = tg3_get_eeprom_len,
13948 .get_eeprom = tg3_get_eeprom,
13949 .set_eeprom = tg3_set_eeprom,
13950 .get_ringparam = tg3_get_ringparam,
13951 .set_ringparam = tg3_set_ringparam,
13952 .get_pauseparam = tg3_get_pauseparam,
13953 .set_pauseparam = tg3_set_pauseparam,
13954 .self_test = tg3_self_test,
13955 .get_strings = tg3_get_strings,
13956 .set_phys_id = tg3_set_phys_id,
13957 .get_ethtool_stats = tg3_get_ethtool_stats,
13958 .get_coalesce = tg3_get_coalesce,
13959 .set_coalesce = tg3_set_coalesce,
13960 .get_sset_count = tg3_get_sset_count,
13961 .get_rxnfc = tg3_get_rxnfc,
13962 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13963 .get_rxfh_indir = tg3_get_rxfh_indir,
13964 .set_rxfh_indir = tg3_set_rxfh_indir,
13965 .get_channels = tg3_get_channels,
13966 .set_channels = tg3_set_channels,
13967 .get_ts_info = tg3_get_ts_info,
13968 .get_eee = tg3_get_eee,
13969 .set_eee = tg3_set_eee,
13972 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13973 struct rtnl_link_stats64 *stats)
13975 struct tg3 *tp = netdev_priv(dev);
13977 spin_lock_bh(&tp->lock);
13978 if (!tp->hw_stats) {
13979 spin_unlock_bh(&tp->lock);
13980 return &tp->net_stats_prev;
13983 tg3_get_nstats(tp, stats);
13984 spin_unlock_bh(&tp->lock);
13989 static void tg3_set_rx_mode(struct net_device *dev)
13991 struct tg3 *tp = netdev_priv(dev);
13993 if (!netif_running(dev))
13996 tg3_full_lock(tp, 0);
13997 __tg3_set_rx_mode(dev);
13998 tg3_full_unlock(tp);
14001 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14004 dev->mtu = new_mtu;
14006 if (new_mtu > ETH_DATA_LEN) {
14007 if (tg3_flag(tp, 5780_CLASS)) {
14008 netdev_update_features(dev);
14009 tg3_flag_clear(tp, TSO_CAPABLE);
14011 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14014 if (tg3_flag(tp, 5780_CLASS)) {
14015 tg3_flag_set(tp, TSO_CAPABLE);
14016 netdev_update_features(dev);
14018 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14022 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14024 struct tg3 *tp = netdev_priv(dev);
14026 bool reset_phy = false;
14028 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14031 if (!netif_running(dev)) {
14032 /* We'll just catch it later when the
14035 tg3_set_mtu(dev, tp, new_mtu);
14041 tg3_netif_stop(tp);
14043 tg3_full_lock(tp, 1);
14045 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14047 tg3_set_mtu(dev, tp, new_mtu);
14049 /* Reset PHY, otherwise the read DMA engine will be in a mode that
14050 * breaks all requests to 256 bytes.
14052 if (tg3_asic_rev(tp) == ASIC_REV_57766)
14055 err = tg3_restart_hw(tp, reset_phy);
14058 tg3_netif_start(tp);
14060 tg3_full_unlock(tp);
14068 static const struct net_device_ops tg3_netdev_ops = {
14069 .ndo_open = tg3_open,
14070 .ndo_stop = tg3_close,
14071 .ndo_start_xmit = tg3_start_xmit,
14072 .ndo_get_stats64 = tg3_get_stats64,
14073 .ndo_validate_addr = eth_validate_addr,
14074 .ndo_set_rx_mode = tg3_set_rx_mode,
14075 .ndo_set_mac_address = tg3_set_mac_addr,
14076 .ndo_do_ioctl = tg3_ioctl,
14077 .ndo_tx_timeout = tg3_tx_timeout,
14078 .ndo_change_mtu = tg3_change_mtu,
14079 .ndo_fix_features = tg3_fix_features,
14080 .ndo_set_features = tg3_set_features,
14081 #ifdef CONFIG_NET_POLL_CONTROLLER
14082 .ndo_poll_controller = tg3_poll_controller,
14086 static void tg3_get_eeprom_size(struct tg3 *tp)
14088 u32 cursize, val, magic;
14090 tp->nvram_size = EEPROM_CHIP_SIZE;
14092 if (tg3_nvram_read(tp, 0, &magic) != 0)
14095 if ((magic != TG3_EEPROM_MAGIC) &&
14096 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14097 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14101 * Size the chip by reading offsets at increasing powers of two.
14102 * When we encounter our validation signature, we know the addressing
14103 * has wrapped around, and thus have our chip size.
14107 while (cursize < tp->nvram_size) {
14108 if (tg3_nvram_read(tp, cursize, &val) != 0)
14117 tp->nvram_size = cursize;
14120 static void tg3_get_nvram_size(struct tg3 *tp)
14124 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14127 /* Selfboot format */
14128 if (val != TG3_EEPROM_MAGIC) {
14129 tg3_get_eeprom_size(tp);
14133 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14135 /* This is confusing. We want to operate on the
14136 * 16-bit value at offset 0xf2. The tg3_nvram_read()
14137 * call will read from NVRAM and byteswap the data
14138 * according to the byteswapping settings for all
14139 * other register accesses. This ensures the data we
14140 * want will always reside in the lower 16-bits.
14141 * However, the data in NVRAM is in LE format, which
14142 * means the data from the NVRAM read will always be
14143 * opposite the endianness of the CPU. The 16-bit
14144 * byteswap then brings the data to CPU endianness.
14146 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14150 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14153 static void tg3_get_nvram_info(struct tg3 *tp)
14157 nvcfg1 = tr32(NVRAM_CFG1);
14158 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14159 tg3_flag_set(tp, FLASH);
14161 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14162 tw32(NVRAM_CFG1, nvcfg1);
14165 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14166 tg3_flag(tp, 5780_CLASS)) {
14167 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14168 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14169 tp->nvram_jedecnum = JEDEC_ATMEL;
14170 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14171 tg3_flag_set(tp, NVRAM_BUFFERED);
14173 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14174 tp->nvram_jedecnum = JEDEC_ATMEL;
14175 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14177 case FLASH_VENDOR_ATMEL_EEPROM:
14178 tp->nvram_jedecnum = JEDEC_ATMEL;
14179 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14180 tg3_flag_set(tp, NVRAM_BUFFERED);
14182 case FLASH_VENDOR_ST:
14183 tp->nvram_jedecnum = JEDEC_ST;
14184 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14185 tg3_flag_set(tp, NVRAM_BUFFERED);
14187 case FLASH_VENDOR_SAIFUN:
14188 tp->nvram_jedecnum = JEDEC_SAIFUN;
14189 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14191 case FLASH_VENDOR_SST_SMALL:
14192 case FLASH_VENDOR_SST_LARGE:
14193 tp->nvram_jedecnum = JEDEC_SST;
14194 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14198 tp->nvram_jedecnum = JEDEC_ATMEL;
14199 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14200 tg3_flag_set(tp, NVRAM_BUFFERED);
14204 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14206 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14207 case FLASH_5752PAGE_SIZE_256:
14208 tp->nvram_pagesize = 256;
14210 case FLASH_5752PAGE_SIZE_512:
14211 tp->nvram_pagesize = 512;
14213 case FLASH_5752PAGE_SIZE_1K:
14214 tp->nvram_pagesize = 1024;
14216 case FLASH_5752PAGE_SIZE_2K:
14217 tp->nvram_pagesize = 2048;
14219 case FLASH_5752PAGE_SIZE_4K:
14220 tp->nvram_pagesize = 4096;
14222 case FLASH_5752PAGE_SIZE_264:
14223 tp->nvram_pagesize = 264;
14225 case FLASH_5752PAGE_SIZE_528:
14226 tp->nvram_pagesize = 528;
14231 static void tg3_get_5752_nvram_info(struct tg3 *tp)
14235 nvcfg1 = tr32(NVRAM_CFG1);
14237 /* NVRAM protection for TPM */
14238 if (nvcfg1 & (1 << 27))
14239 tg3_flag_set(tp, PROTECTED_NVRAM);
14241 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14242 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14243 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14244 tp->nvram_jedecnum = JEDEC_ATMEL;
14245 tg3_flag_set(tp, NVRAM_BUFFERED);
14247 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14248 tp->nvram_jedecnum = JEDEC_ATMEL;
14249 tg3_flag_set(tp, NVRAM_BUFFERED);
14250 tg3_flag_set(tp, FLASH);
14252 case FLASH_5752VENDOR_ST_M45PE10:
14253 case FLASH_5752VENDOR_ST_M45PE20:
14254 case FLASH_5752VENDOR_ST_M45PE40:
14255 tp->nvram_jedecnum = JEDEC_ST;
14256 tg3_flag_set(tp, NVRAM_BUFFERED);
14257 tg3_flag_set(tp, FLASH);
14261 if (tg3_flag(tp, FLASH)) {
14262 tg3_nvram_get_pagesize(tp, nvcfg1);
14264 /* For eeprom, set pagesize to maximum eeprom size */
14265 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14267 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14268 tw32(NVRAM_CFG1, nvcfg1);
14272 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14274 u32 nvcfg1, protect = 0;
14276 nvcfg1 = tr32(NVRAM_CFG1);
14278 /* NVRAM protection for TPM */
14279 if (nvcfg1 & (1 << 27)) {
14280 tg3_flag_set(tp, PROTECTED_NVRAM);
14284 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14286 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14287 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14288 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14289 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14290 tp->nvram_jedecnum = JEDEC_ATMEL;
14291 tg3_flag_set(tp, NVRAM_BUFFERED);
14292 tg3_flag_set(tp, FLASH);
14293 tp->nvram_pagesize = 264;
14294 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14295 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14296 tp->nvram_size = (protect ? 0x3e200 :
14297 TG3_NVRAM_SIZE_512KB);
14298 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14299 tp->nvram_size = (protect ? 0x1f200 :
14300 TG3_NVRAM_SIZE_256KB);
14302 tp->nvram_size = (protect ? 0x1f200 :
14303 TG3_NVRAM_SIZE_128KB);
14305 case FLASH_5752VENDOR_ST_M45PE10:
14306 case FLASH_5752VENDOR_ST_M45PE20:
14307 case FLASH_5752VENDOR_ST_M45PE40:
14308 tp->nvram_jedecnum = JEDEC_ST;
14309 tg3_flag_set(tp, NVRAM_BUFFERED);
14310 tg3_flag_set(tp, FLASH);
14311 tp->nvram_pagesize = 256;
14312 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14313 tp->nvram_size = (protect ?
14314 TG3_NVRAM_SIZE_64KB :
14315 TG3_NVRAM_SIZE_128KB);
14316 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14317 tp->nvram_size = (protect ?
14318 TG3_NVRAM_SIZE_64KB :
14319 TG3_NVRAM_SIZE_256KB);
14321 tp->nvram_size = (protect ?
14322 TG3_NVRAM_SIZE_128KB :
14323 TG3_NVRAM_SIZE_512KB);
14328 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14332 nvcfg1 = tr32(NVRAM_CFG1);
14334 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14335 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14336 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14337 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14338 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14339 tp->nvram_jedecnum = JEDEC_ATMEL;
14340 tg3_flag_set(tp, NVRAM_BUFFERED);
14341 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14343 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14344 tw32(NVRAM_CFG1, nvcfg1);
14346 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14347 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14348 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14349 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14350 tp->nvram_jedecnum = JEDEC_ATMEL;
14351 tg3_flag_set(tp, NVRAM_BUFFERED);
14352 tg3_flag_set(tp, FLASH);
14353 tp->nvram_pagesize = 264;
14355 case FLASH_5752VENDOR_ST_M45PE10:
14356 case FLASH_5752VENDOR_ST_M45PE20:
14357 case FLASH_5752VENDOR_ST_M45PE40:
14358 tp->nvram_jedecnum = JEDEC_ST;
14359 tg3_flag_set(tp, NVRAM_BUFFERED);
14360 tg3_flag_set(tp, FLASH);
14361 tp->nvram_pagesize = 256;
14366 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14368 u32 nvcfg1, protect = 0;
14370 nvcfg1 = tr32(NVRAM_CFG1);
14372 /* NVRAM protection for TPM */
14373 if (nvcfg1 & (1 << 27)) {
14374 tg3_flag_set(tp, PROTECTED_NVRAM);
14378 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14380 case FLASH_5761VENDOR_ATMEL_ADB021D:
14381 case FLASH_5761VENDOR_ATMEL_ADB041D:
14382 case FLASH_5761VENDOR_ATMEL_ADB081D:
14383 case FLASH_5761VENDOR_ATMEL_ADB161D:
14384 case FLASH_5761VENDOR_ATMEL_MDB021D:
14385 case FLASH_5761VENDOR_ATMEL_MDB041D:
14386 case FLASH_5761VENDOR_ATMEL_MDB081D:
14387 case FLASH_5761VENDOR_ATMEL_MDB161D:
14388 tp->nvram_jedecnum = JEDEC_ATMEL;
14389 tg3_flag_set(tp, NVRAM_BUFFERED);
14390 tg3_flag_set(tp, FLASH);
14391 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14392 tp->nvram_pagesize = 256;
14394 case FLASH_5761VENDOR_ST_A_M45PE20:
14395 case FLASH_5761VENDOR_ST_A_M45PE40:
14396 case FLASH_5761VENDOR_ST_A_M45PE80:
14397 case FLASH_5761VENDOR_ST_A_M45PE16:
14398 case FLASH_5761VENDOR_ST_M_M45PE20:
14399 case FLASH_5761VENDOR_ST_M_M45PE40:
14400 case FLASH_5761VENDOR_ST_M_M45PE80:
14401 case FLASH_5761VENDOR_ST_M_M45PE16:
14402 tp->nvram_jedecnum = JEDEC_ST;
14403 tg3_flag_set(tp, NVRAM_BUFFERED);
14404 tg3_flag_set(tp, FLASH);
14405 tp->nvram_pagesize = 256;
14410 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14413 case FLASH_5761VENDOR_ATMEL_ADB161D:
14414 case FLASH_5761VENDOR_ATMEL_MDB161D:
14415 case FLASH_5761VENDOR_ST_A_M45PE16:
14416 case FLASH_5761VENDOR_ST_M_M45PE16:
14417 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14419 case FLASH_5761VENDOR_ATMEL_ADB081D:
14420 case FLASH_5761VENDOR_ATMEL_MDB081D:
14421 case FLASH_5761VENDOR_ST_A_M45PE80:
14422 case FLASH_5761VENDOR_ST_M_M45PE80:
14423 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14425 case FLASH_5761VENDOR_ATMEL_ADB041D:
14426 case FLASH_5761VENDOR_ATMEL_MDB041D:
14427 case FLASH_5761VENDOR_ST_A_M45PE40:
14428 case FLASH_5761VENDOR_ST_M_M45PE40:
14429 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14431 case FLASH_5761VENDOR_ATMEL_ADB021D:
14432 case FLASH_5761VENDOR_ATMEL_MDB021D:
14433 case FLASH_5761VENDOR_ST_A_M45PE20:
14434 case FLASH_5761VENDOR_ST_M_M45PE20:
14435 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14441 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14443 tp->nvram_jedecnum = JEDEC_ATMEL;
14444 tg3_flag_set(tp, NVRAM_BUFFERED);
14445 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14448 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14452 nvcfg1 = tr32(NVRAM_CFG1);
14454 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14455 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14456 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14457 tp->nvram_jedecnum = JEDEC_ATMEL;
14458 tg3_flag_set(tp, NVRAM_BUFFERED);
14459 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14461 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14462 tw32(NVRAM_CFG1, nvcfg1);
14464 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14465 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14466 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14467 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14468 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14469 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14470 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14471 tp->nvram_jedecnum = JEDEC_ATMEL;
14472 tg3_flag_set(tp, NVRAM_BUFFERED);
14473 tg3_flag_set(tp, FLASH);
14475 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14476 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14477 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14478 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14479 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14481 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14482 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14483 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14485 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14486 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14487 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14491 case FLASH_5752VENDOR_ST_M45PE10:
14492 case FLASH_5752VENDOR_ST_M45PE20:
14493 case FLASH_5752VENDOR_ST_M45PE40:
14494 tp->nvram_jedecnum = JEDEC_ST;
14495 tg3_flag_set(tp, NVRAM_BUFFERED);
14496 tg3_flag_set(tp, FLASH);
14498 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14499 case FLASH_5752VENDOR_ST_M45PE10:
14500 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14502 case FLASH_5752VENDOR_ST_M45PE20:
14503 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14505 case FLASH_5752VENDOR_ST_M45PE40:
14506 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14511 tg3_flag_set(tp, NO_NVRAM);
14515 tg3_nvram_get_pagesize(tp, nvcfg1);
14516 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14517 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14521 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14525 nvcfg1 = tr32(NVRAM_CFG1);
14527 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14528 case FLASH_5717VENDOR_ATMEL_EEPROM:
14529 case FLASH_5717VENDOR_MICRO_EEPROM:
14530 tp->nvram_jedecnum = JEDEC_ATMEL;
14531 tg3_flag_set(tp, NVRAM_BUFFERED);
14532 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14534 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14535 tw32(NVRAM_CFG1, nvcfg1);
14537 case FLASH_5717VENDOR_ATMEL_MDB011D:
14538 case FLASH_5717VENDOR_ATMEL_ADB011B:
14539 case FLASH_5717VENDOR_ATMEL_ADB011D:
14540 case FLASH_5717VENDOR_ATMEL_MDB021D:
14541 case FLASH_5717VENDOR_ATMEL_ADB021B:
14542 case FLASH_5717VENDOR_ATMEL_ADB021D:
14543 case FLASH_5717VENDOR_ATMEL_45USPT:
14544 tp->nvram_jedecnum = JEDEC_ATMEL;
14545 tg3_flag_set(tp, NVRAM_BUFFERED);
14546 tg3_flag_set(tp, FLASH);
14548 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14549 case FLASH_5717VENDOR_ATMEL_MDB021D:
14550 /* Detect size with tg3_nvram_get_size() */
14552 case FLASH_5717VENDOR_ATMEL_ADB021B:
14553 case FLASH_5717VENDOR_ATMEL_ADB021D:
14554 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14557 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14561 case FLASH_5717VENDOR_ST_M_M25PE10:
14562 case FLASH_5717VENDOR_ST_A_M25PE10:
14563 case FLASH_5717VENDOR_ST_M_M45PE10:
14564 case FLASH_5717VENDOR_ST_A_M45PE10:
14565 case FLASH_5717VENDOR_ST_M_M25PE20:
14566 case FLASH_5717VENDOR_ST_A_M25PE20:
14567 case FLASH_5717VENDOR_ST_M_M45PE20:
14568 case FLASH_5717VENDOR_ST_A_M45PE20:
14569 case FLASH_5717VENDOR_ST_25USPT:
14570 case FLASH_5717VENDOR_ST_45USPT:
14571 tp->nvram_jedecnum = JEDEC_ST;
14572 tg3_flag_set(tp, NVRAM_BUFFERED);
14573 tg3_flag_set(tp, FLASH);
14575 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14576 case FLASH_5717VENDOR_ST_M_M25PE20:
14577 case FLASH_5717VENDOR_ST_M_M45PE20:
14578 /* Detect size with tg3_nvram_get_size() */
14580 case FLASH_5717VENDOR_ST_A_M25PE20:
14581 case FLASH_5717VENDOR_ST_A_M45PE20:
14582 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14585 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14590 tg3_flag_set(tp, NO_NVRAM);
14594 tg3_nvram_get_pagesize(tp, nvcfg1);
14595 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14596 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14599 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14601 u32 nvcfg1, nvmpinstrp;
14603 nvcfg1 = tr32(NVRAM_CFG1);
14604 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14606 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14607 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14608 tg3_flag_set(tp, NO_NVRAM);
14612 switch (nvmpinstrp) {
14613 case FLASH_5762_EEPROM_HD:
14614 nvmpinstrp = FLASH_5720_EEPROM_HD;
14616 case FLASH_5762_EEPROM_LD:
14617 nvmpinstrp = FLASH_5720_EEPROM_LD;
14619 case FLASH_5720VENDOR_M_ST_M45PE20:
14620 /* This pinstrap supports multiple sizes, so force it
14621 * to read the actual size from location 0xf0.
14623 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14628 switch (nvmpinstrp) {
14629 case FLASH_5720_EEPROM_HD:
14630 case FLASH_5720_EEPROM_LD:
14631 tp->nvram_jedecnum = JEDEC_ATMEL;
14632 tg3_flag_set(tp, NVRAM_BUFFERED);
14634 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14635 tw32(NVRAM_CFG1, nvcfg1);
14636 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14637 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14639 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14641 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14642 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14643 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14644 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14645 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14646 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14647 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14648 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14649 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14650 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14651 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14652 case FLASH_5720VENDOR_ATMEL_45USPT:
14653 tp->nvram_jedecnum = JEDEC_ATMEL;
14654 tg3_flag_set(tp, NVRAM_BUFFERED);
14655 tg3_flag_set(tp, FLASH);
14657 switch (nvmpinstrp) {
14658 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14659 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14660 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14661 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14663 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14664 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14665 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14666 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14668 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14669 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14670 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14673 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14674 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14678 case FLASH_5720VENDOR_M_ST_M25PE10:
14679 case FLASH_5720VENDOR_M_ST_M45PE10:
14680 case FLASH_5720VENDOR_A_ST_M25PE10:
14681 case FLASH_5720VENDOR_A_ST_M45PE10:
14682 case FLASH_5720VENDOR_M_ST_M25PE20:
14683 case FLASH_5720VENDOR_M_ST_M45PE20:
14684 case FLASH_5720VENDOR_A_ST_M25PE20:
14685 case FLASH_5720VENDOR_A_ST_M45PE20:
14686 case FLASH_5720VENDOR_M_ST_M25PE40:
14687 case FLASH_5720VENDOR_M_ST_M45PE40:
14688 case FLASH_5720VENDOR_A_ST_M25PE40:
14689 case FLASH_5720VENDOR_A_ST_M45PE40:
14690 case FLASH_5720VENDOR_M_ST_M25PE80:
14691 case FLASH_5720VENDOR_M_ST_M45PE80:
14692 case FLASH_5720VENDOR_A_ST_M25PE80:
14693 case FLASH_5720VENDOR_A_ST_M45PE80:
14694 case FLASH_5720VENDOR_ST_25USPT:
14695 case FLASH_5720VENDOR_ST_45USPT:
14696 tp->nvram_jedecnum = JEDEC_ST;
14697 tg3_flag_set(tp, NVRAM_BUFFERED);
14698 tg3_flag_set(tp, FLASH);
14700 switch (nvmpinstrp) {
14701 case FLASH_5720VENDOR_M_ST_M25PE20:
14702 case FLASH_5720VENDOR_M_ST_M45PE20:
14703 case FLASH_5720VENDOR_A_ST_M25PE20:
14704 case FLASH_5720VENDOR_A_ST_M45PE20:
14705 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14707 case FLASH_5720VENDOR_M_ST_M25PE40:
14708 case FLASH_5720VENDOR_M_ST_M45PE40:
14709 case FLASH_5720VENDOR_A_ST_M25PE40:
14710 case FLASH_5720VENDOR_A_ST_M45PE40:
14711 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14713 case FLASH_5720VENDOR_M_ST_M25PE80:
14714 case FLASH_5720VENDOR_M_ST_M45PE80:
14715 case FLASH_5720VENDOR_A_ST_M25PE80:
14716 case FLASH_5720VENDOR_A_ST_M45PE80:
14717 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14720 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14721 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14726 tg3_flag_set(tp, NO_NVRAM);
14730 tg3_nvram_get_pagesize(tp, nvcfg1);
14731 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14732 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14734 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14737 if (tg3_nvram_read(tp, 0, &val))
14740 if (val != TG3_EEPROM_MAGIC &&
14741 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14742 tg3_flag_set(tp, NO_NVRAM);
14746 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14747 static void tg3_nvram_init(struct tg3 *tp)
14749 if (tg3_flag(tp, IS_SSB_CORE)) {
14750 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14751 tg3_flag_clear(tp, NVRAM);
14752 tg3_flag_clear(tp, NVRAM_BUFFERED);
14753 tg3_flag_set(tp, NO_NVRAM);
14757 tw32_f(GRC_EEPROM_ADDR,
14758 (EEPROM_ADDR_FSM_RESET |
14759 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14760 EEPROM_ADDR_CLKPERD_SHIFT)));
14764 /* Enable seeprom accesses. */
14765 tw32_f(GRC_LOCAL_CTRL,
14766 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14769 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14770 tg3_asic_rev(tp) != ASIC_REV_5701) {
14771 tg3_flag_set(tp, NVRAM);
14773 if (tg3_nvram_lock(tp)) {
14774 netdev_warn(tp->dev,
14775 "Cannot get nvram lock, %s failed\n",
14779 tg3_enable_nvram_access(tp);
14781 tp->nvram_size = 0;
14783 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14784 tg3_get_5752_nvram_info(tp);
14785 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14786 tg3_get_5755_nvram_info(tp);
14787 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14788 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14789 tg3_asic_rev(tp) == ASIC_REV_5785)
14790 tg3_get_5787_nvram_info(tp);
14791 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14792 tg3_get_5761_nvram_info(tp);
14793 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14794 tg3_get_5906_nvram_info(tp);
14795 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14796 tg3_flag(tp, 57765_CLASS))
14797 tg3_get_57780_nvram_info(tp);
14798 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14799 tg3_asic_rev(tp) == ASIC_REV_5719)
14800 tg3_get_5717_nvram_info(tp);
14801 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14802 tg3_asic_rev(tp) == ASIC_REV_5762)
14803 tg3_get_5720_nvram_info(tp);
14805 tg3_get_nvram_info(tp);
14807 if (tp->nvram_size == 0)
14808 tg3_get_nvram_size(tp);
14810 tg3_disable_nvram_access(tp);
14811 tg3_nvram_unlock(tp);
14814 tg3_flag_clear(tp, NVRAM);
14815 tg3_flag_clear(tp, NVRAM_BUFFERED);
14817 tg3_get_eeprom_size(tp);
14821 struct subsys_tbl_ent {
14822 u16 subsys_vendor, subsys_devid;
14826 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14827 /* Broadcom boards. */
14828 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14829 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14830 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14831 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14832 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14833 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14834 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14835 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14836 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14837 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14838 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14839 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14840 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14841 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14842 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14843 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14844 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14845 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14846 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14847 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14848 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14849 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14852 { TG3PCI_SUBVENDOR_ID_3COM,
14853 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14854 { TG3PCI_SUBVENDOR_ID_3COM,
14855 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14856 { TG3PCI_SUBVENDOR_ID_3COM,
14857 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14858 { TG3PCI_SUBVENDOR_ID_3COM,
14859 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14860 { TG3PCI_SUBVENDOR_ID_3COM,
14861 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14864 { TG3PCI_SUBVENDOR_ID_DELL,
14865 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14866 { TG3PCI_SUBVENDOR_ID_DELL,
14867 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14868 { TG3PCI_SUBVENDOR_ID_DELL,
14869 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14870 { TG3PCI_SUBVENDOR_ID_DELL,
14871 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14873 /* Compaq boards. */
14874 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14875 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14876 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14877 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14878 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14879 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14880 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14881 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14882 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14883 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14886 { TG3PCI_SUBVENDOR_ID_IBM,
14887 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14890 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14894 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14895 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14896 tp->pdev->subsystem_vendor) &&
14897 (subsys_id_to_phy_id[i].subsys_devid ==
14898 tp->pdev->subsystem_device))
14899 return &subsys_id_to_phy_id[i];
14904 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14908 tp->phy_id = TG3_PHY_ID_INVALID;
14909 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14911 /* Assume an onboard device and WOL capable by default. */
14912 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14913 tg3_flag_set(tp, WOL_CAP);
14915 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14916 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14917 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14918 tg3_flag_set(tp, IS_NIC);
14920 val = tr32(VCPU_CFGSHDW);
14921 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14922 tg3_flag_set(tp, ASPM_WORKAROUND);
14923 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14924 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14925 tg3_flag_set(tp, WOL_ENABLE);
14926 device_set_wakeup_enable(&tp->pdev->dev, true);
14931 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14932 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14933 u32 nic_cfg, led_cfg;
14934 u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
14935 u32 nic_phy_id, ver, eeprom_phy_id;
14936 int eeprom_phy_serdes = 0;
14938 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14939 tp->nic_sram_data_cfg = nic_cfg;
14941 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14942 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14943 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14944 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14945 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14946 (ver > 0) && (ver < 0x100))
14947 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14949 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14950 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14952 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14953 tg3_asic_rev(tp) == ASIC_REV_5719 ||
14954 tg3_asic_rev(tp) == ASIC_REV_5720)
14955 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
14957 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14958 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14959 eeprom_phy_serdes = 1;
14961 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14962 if (nic_phy_id != 0) {
14963 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14964 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14966 eeprom_phy_id = (id1 >> 16) << 10;
14967 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14968 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14972 tp->phy_id = eeprom_phy_id;
14973 if (eeprom_phy_serdes) {
14974 if (!tg3_flag(tp, 5705_PLUS))
14975 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14977 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14980 if (tg3_flag(tp, 5750_PLUS))
14981 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14982 SHASTA_EXT_LED_MODE_MASK);
14984 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14988 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14989 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14992 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14993 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14996 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14997 tp->led_ctrl = LED_CTRL_MODE_MAC;
14999 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
15000 * read on some older 5700/5701 bootcode.
15002 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15003 tg3_asic_rev(tp) == ASIC_REV_5701)
15004 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15008 case SHASTA_EXT_LED_SHARED:
15009 tp->led_ctrl = LED_CTRL_MODE_SHARED;
15010 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15011 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15012 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15013 LED_CTRL_MODE_PHY_2);
15015 if (tg3_flag(tp, 5717_PLUS) ||
15016 tg3_asic_rev(tp) == ASIC_REV_5762)
15017 tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15018 LED_CTRL_BLINK_RATE_MASK;
15022 case SHASTA_EXT_LED_MAC:
15023 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15026 case SHASTA_EXT_LED_COMBO:
15027 tp->led_ctrl = LED_CTRL_MODE_COMBO;
15028 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15029 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15030 LED_CTRL_MODE_PHY_2);
15035 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15036 tg3_asic_rev(tp) == ASIC_REV_5701) &&
15037 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15038 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15040 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15041 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15043 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15044 tg3_flag_set(tp, EEPROM_WRITE_PROT);
15045 if ((tp->pdev->subsystem_vendor ==
15046 PCI_VENDOR_ID_ARIMA) &&
15047 (tp->pdev->subsystem_device == 0x205a ||
15048 tp->pdev->subsystem_device == 0x2063))
15049 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15051 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15052 tg3_flag_set(tp, IS_NIC);
15055 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15056 tg3_flag_set(tp, ENABLE_ASF);
15057 if (tg3_flag(tp, 5750_PLUS))
15058 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15061 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15062 tg3_flag(tp, 5750_PLUS))
15063 tg3_flag_set(tp, ENABLE_APE);
15065 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15066 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15067 tg3_flag_clear(tp, WOL_CAP);
15069 if (tg3_flag(tp, WOL_CAP) &&
15070 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15071 tg3_flag_set(tp, WOL_ENABLE);
15072 device_set_wakeup_enable(&tp->pdev->dev, true);
15075 if (cfg2 & (1 << 17))
15076 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15078 /* serdes signal pre-emphasis in register 0x590 set by */
15079 /* bootcode if bit 18 is set */
15080 if (cfg2 & (1 << 18))
15081 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15083 if ((tg3_flag(tp, 57765_PLUS) ||
15084 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15085 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15086 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15087 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15089 if (tg3_flag(tp, PCI_EXPRESS)) {
15092 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15093 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15094 !tg3_flag(tp, 57765_PLUS) &&
15095 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15096 tg3_flag_set(tp, ASPM_WORKAROUND);
15097 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15098 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15099 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15100 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15103 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15104 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15105 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15106 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15107 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15108 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15110 if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15111 tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15114 if (tg3_flag(tp, WOL_CAP))
15115 device_set_wakeup_enable(&tp->pdev->dev,
15116 tg3_flag(tp, WOL_ENABLE));
15118 device_set_wakeup_capable(&tp->pdev->dev, false);
15121 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15124 u32 val2, off = offset * 8;
15126 err = tg3_nvram_lock(tp);
15130 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15131 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15132 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15133 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15136 for (i = 0; i < 100; i++) {
15137 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15138 if (val2 & APE_OTP_STATUS_CMD_DONE) {
15139 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15145 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15147 tg3_nvram_unlock(tp);
15148 if (val2 & APE_OTP_STATUS_CMD_DONE)
15154 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15159 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15160 tw32(OTP_CTRL, cmd);
15162 /* Wait for up to 1 ms for command to execute. */
15163 for (i = 0; i < 100; i++) {
15164 val = tr32(OTP_STATUS);
15165 if (val & OTP_STATUS_CMD_DONE)
15170 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15173 /* Read the gphy configuration from the OTP region of the chip. The gphy
15174 * configuration is a 32-bit value that straddles the alignment boundary.
15175 * We do two 32-bit reads and then shift and merge the results.
15177 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15179 u32 bhalf_otp, thalf_otp;
15181 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15183 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15186 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15188 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15191 thalf_otp = tr32(OTP_READ_DATA);
15193 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15195 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15198 bhalf_otp = tr32(OTP_READ_DATA);
15200 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15203 static void tg3_phy_init_link_config(struct tg3 *tp)
15205 u32 adv = ADVERTISED_Autoneg;
15207 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15208 if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15209 adv |= ADVERTISED_1000baseT_Half;
15210 adv |= ADVERTISED_1000baseT_Full;
15213 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15214 adv |= ADVERTISED_100baseT_Half |
15215 ADVERTISED_100baseT_Full |
15216 ADVERTISED_10baseT_Half |
15217 ADVERTISED_10baseT_Full |
15220 adv |= ADVERTISED_FIBRE;
15222 tp->link_config.advertising = adv;
15223 tp->link_config.speed = SPEED_UNKNOWN;
15224 tp->link_config.duplex = DUPLEX_UNKNOWN;
15225 tp->link_config.autoneg = AUTONEG_ENABLE;
15226 tp->link_config.active_speed = SPEED_UNKNOWN;
15227 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15232 static int tg3_phy_probe(struct tg3 *tp)
15234 u32 hw_phy_id_1, hw_phy_id_2;
15235 u32 hw_phy_id, hw_phy_id_masked;
15238 /* flow control autonegotiation is default behavior */
15239 tg3_flag_set(tp, PAUSE_AUTONEG);
15240 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15242 if (tg3_flag(tp, ENABLE_APE)) {
15243 switch (tp->pci_fn) {
15245 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15248 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15251 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15254 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15259 if (!tg3_flag(tp, ENABLE_ASF) &&
15260 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15261 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15262 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15263 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15265 if (tg3_flag(tp, USE_PHYLIB))
15266 return tg3_phy_init(tp);
15268 /* Reading the PHY ID register can conflict with ASF
15269 * firmware access to the PHY hardware.
15272 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15273 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15275 /* Now read the physical PHY_ID from the chip and verify
15276 * that it is sane. If it doesn't look good, we fall back
15277 * to either the hard-coded table based PHY_ID and failing
15278 * that the value found in the eeprom area.
15280 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15281 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15283 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15284 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15285 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15287 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15290 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15291 tp->phy_id = hw_phy_id;
15292 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15293 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15295 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15297 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15298 /* Do nothing, phy ID already set up in
15299 * tg3_get_eeprom_hw_cfg().
15302 struct subsys_tbl_ent *p;
15304 /* No eeprom signature? Try the hardcoded
15305 * subsys device table.
15307 p = tg3_lookup_by_subsys(tp);
15309 tp->phy_id = p->phy_id;
15310 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15311 /* For now we saw the IDs 0xbc050cd0,
15312 * 0xbc050f80 and 0xbc050c30 on devices
15313 * connected to an BCM4785 and there are
15314 * probably more. Just assume that the phy is
15315 * supported when it is connected to a SSB core
15322 tp->phy_id == TG3_PHY_ID_BCM8002)
15323 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15327 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15328 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15329 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15330 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15331 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15332 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15333 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15334 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15335 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15336 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15338 tp->eee.supported = SUPPORTED_100baseT_Full |
15339 SUPPORTED_1000baseT_Full;
15340 tp->eee.advertised = ADVERTISED_100baseT_Full |
15341 ADVERTISED_1000baseT_Full;
15342 tp->eee.eee_enabled = 1;
15343 tp->eee.tx_lpi_enabled = 1;
15344 tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15347 tg3_phy_init_link_config(tp);
15349 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15350 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15351 !tg3_flag(tp, ENABLE_APE) &&
15352 !tg3_flag(tp, ENABLE_ASF)) {
15355 tg3_readphy(tp, MII_BMSR, &bmsr);
15356 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15357 (bmsr & BMSR_LSTATUS))
15358 goto skip_phy_reset;
15360 err = tg3_phy_reset(tp);
15364 tg3_phy_set_wirespeed(tp);
15366 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15367 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15368 tp->link_config.flowctrl);
15370 tg3_writephy(tp, MII_BMCR,
15371 BMCR_ANENABLE | BMCR_ANRESTART);
15376 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15377 err = tg3_init_5401phy_dsp(tp);
15381 err = tg3_init_5401phy_dsp(tp);
15387 static void tg3_read_vpd(struct tg3 *tp)
15390 unsigned int block_end, rosize, len;
15394 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15398 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15400 goto out_not_found;
15402 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15403 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15404 i += PCI_VPD_LRDT_TAG_SIZE;
15406 if (block_end > vpdlen)
15407 goto out_not_found;
15409 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15410 PCI_VPD_RO_KEYWORD_MFR_ID);
15412 len = pci_vpd_info_field_size(&vpd_data[j]);
15414 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15415 if (j + len > block_end || len != 4 ||
15416 memcmp(&vpd_data[j], "1028", 4))
15419 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15420 PCI_VPD_RO_KEYWORD_VENDOR0);
15424 len = pci_vpd_info_field_size(&vpd_data[j]);
15426 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15427 if (j + len > block_end)
15430 if (len >= sizeof(tp->fw_ver))
15431 len = sizeof(tp->fw_ver) - 1;
15432 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15433 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15438 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15439 PCI_VPD_RO_KEYWORD_PARTNO);
15441 goto out_not_found;
15443 len = pci_vpd_info_field_size(&vpd_data[i]);
15445 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15446 if (len > TG3_BPN_SIZE ||
15447 (len + i) > vpdlen)
15448 goto out_not_found;
15450 memcpy(tp->board_part_number, &vpd_data[i], len);
15454 if (tp->board_part_number[0])
15458 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15459 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15460 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15461 strcpy(tp->board_part_number, "BCM5717");
15462 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15463 strcpy(tp->board_part_number, "BCM5718");
15466 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15467 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15468 strcpy(tp->board_part_number, "BCM57780");
15469 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15470 strcpy(tp->board_part_number, "BCM57760");
15471 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15472 strcpy(tp->board_part_number, "BCM57790");
15473 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15474 strcpy(tp->board_part_number, "BCM57788");
15477 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15478 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15479 strcpy(tp->board_part_number, "BCM57761");
15480 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15481 strcpy(tp->board_part_number, "BCM57765");
15482 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15483 strcpy(tp->board_part_number, "BCM57781");
15484 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15485 strcpy(tp->board_part_number, "BCM57785");
15486 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15487 strcpy(tp->board_part_number, "BCM57791");
15488 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15489 strcpy(tp->board_part_number, "BCM57795");
15492 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15493 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15494 strcpy(tp->board_part_number, "BCM57762");
15495 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15496 strcpy(tp->board_part_number, "BCM57766");
15497 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15498 strcpy(tp->board_part_number, "BCM57782");
15499 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15500 strcpy(tp->board_part_number, "BCM57786");
15503 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15504 strcpy(tp->board_part_number, "BCM95906");
15507 strcpy(tp->board_part_number, "none");
15511 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15515 if (tg3_nvram_read(tp, offset, &val) ||
15516 (val & 0xfc000000) != 0x0c000000 ||
15517 tg3_nvram_read(tp, offset + 4, &val) ||
15524 static void tg3_read_bc_ver(struct tg3 *tp)
15526 u32 val, offset, start, ver_offset;
15528 bool newver = false;
15530 if (tg3_nvram_read(tp, 0xc, &offset) ||
15531 tg3_nvram_read(tp, 0x4, &start))
15534 offset = tg3_nvram_logical_addr(tp, offset);
15536 if (tg3_nvram_read(tp, offset, &val))
15539 if ((val & 0xfc000000) == 0x0c000000) {
15540 if (tg3_nvram_read(tp, offset + 4, &val))
15547 dst_off = strlen(tp->fw_ver);
15550 if (TG3_VER_SIZE - dst_off < 16 ||
15551 tg3_nvram_read(tp, offset + 8, &ver_offset))
15554 offset = offset + ver_offset - start;
15555 for (i = 0; i < 16; i += 4) {
15557 if (tg3_nvram_read_be32(tp, offset + i, &v))
15560 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15565 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15568 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15569 TG3_NVM_BCVER_MAJSFT;
15570 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15571 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15572 "v%d.%02d", major, minor);
15576 static void tg3_read_hwsb_ver(struct tg3 *tp)
15578 u32 val, major, minor;
15580 /* Use native endian representation */
15581 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15584 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15585 TG3_NVM_HWSB_CFG1_MAJSFT;
15586 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15587 TG3_NVM_HWSB_CFG1_MINSFT;
15589 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15592 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15594 u32 offset, major, minor, build;
15596 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15598 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15601 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15602 case TG3_EEPROM_SB_REVISION_0:
15603 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15605 case TG3_EEPROM_SB_REVISION_2:
15606 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15608 case TG3_EEPROM_SB_REVISION_3:
15609 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15611 case TG3_EEPROM_SB_REVISION_4:
15612 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15614 case TG3_EEPROM_SB_REVISION_5:
15615 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15617 case TG3_EEPROM_SB_REVISION_6:
15618 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15624 if (tg3_nvram_read(tp, offset, &val))
15627 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15628 TG3_EEPROM_SB_EDH_BLD_SHFT;
15629 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15630 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15631 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15633 if (minor > 99 || build > 26)
15636 offset = strlen(tp->fw_ver);
15637 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15638 " v%d.%02d", major, minor);
15641 offset = strlen(tp->fw_ver);
15642 if (offset < TG3_VER_SIZE - 1)
15643 tp->fw_ver[offset] = 'a' + build - 1;
15647 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15649 u32 val, offset, start;
15652 for (offset = TG3_NVM_DIR_START;
15653 offset < TG3_NVM_DIR_END;
15654 offset += TG3_NVM_DIRENT_SIZE) {
15655 if (tg3_nvram_read(tp, offset, &val))
15658 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15662 if (offset == TG3_NVM_DIR_END)
15665 if (!tg3_flag(tp, 5705_PLUS))
15666 start = 0x08000000;
15667 else if (tg3_nvram_read(tp, offset - 4, &start))
15670 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15671 !tg3_fw_img_is_valid(tp, offset) ||
15672 tg3_nvram_read(tp, offset + 8, &val))
15675 offset += val - start;
15677 vlen = strlen(tp->fw_ver);
15679 tp->fw_ver[vlen++] = ',';
15680 tp->fw_ver[vlen++] = ' ';
15682 for (i = 0; i < 4; i++) {
15684 if (tg3_nvram_read_be32(tp, offset, &v))
15687 offset += sizeof(v);
15689 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15690 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15694 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15699 static void tg3_probe_ncsi(struct tg3 *tp)
15703 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15704 if (apedata != APE_SEG_SIG_MAGIC)
15707 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15708 if (!(apedata & APE_FW_STATUS_READY))
15711 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15712 tg3_flag_set(tp, APE_HAS_NCSI);
15715 static void tg3_read_dash_ver(struct tg3 *tp)
15721 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15723 if (tg3_flag(tp, APE_HAS_NCSI))
15725 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15730 vlen = strlen(tp->fw_ver);
15732 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15734 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15735 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15736 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15737 (apedata & APE_FW_VERSION_BLDMSK));
15740 static void tg3_read_otp_ver(struct tg3 *tp)
15744 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15747 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15748 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15749 TG3_OTP_MAGIC0_VALID(val)) {
15750 u64 val64 = (u64) val << 32 | val2;
15754 for (i = 0; i < 7; i++) {
15755 if ((val64 & 0xff) == 0)
15757 ver = val64 & 0xff;
15760 vlen = strlen(tp->fw_ver);
15761 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15765 static void tg3_read_fw_ver(struct tg3 *tp)
15768 bool vpd_vers = false;
15770 if (tp->fw_ver[0] != 0)
15773 if (tg3_flag(tp, NO_NVRAM)) {
15774 strcat(tp->fw_ver, "sb");
15775 tg3_read_otp_ver(tp);
15779 if (tg3_nvram_read(tp, 0, &val))
15782 if (val == TG3_EEPROM_MAGIC)
15783 tg3_read_bc_ver(tp);
15784 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15785 tg3_read_sb_ver(tp, val);
15786 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15787 tg3_read_hwsb_ver(tp);
15789 if (tg3_flag(tp, ENABLE_ASF)) {
15790 if (tg3_flag(tp, ENABLE_APE)) {
15791 tg3_probe_ncsi(tp);
15793 tg3_read_dash_ver(tp);
15794 } else if (!vpd_vers) {
15795 tg3_read_mgmtfw_ver(tp);
15799 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15802 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15804 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15805 return TG3_RX_RET_MAX_SIZE_5717;
15806 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15807 return TG3_RX_RET_MAX_SIZE_5700;
15809 return TG3_RX_RET_MAX_SIZE_5705;
15812 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15813 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15814 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15815 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15819 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15821 struct pci_dev *peer;
15822 unsigned int func, devnr = tp->pdev->devfn & ~7;
15824 for (func = 0; func < 8; func++) {
15825 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15826 if (peer && peer != tp->pdev)
15830 /* 5704 can be configured in single-port mode, set peer to
15831 * tp->pdev in that case.
15839 * We don't need to keep the refcount elevated; there's no way
15840 * to remove one half of this device without removing the other
15847 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15849 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15850 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15853 /* All devices that use the alternate
15854 * ASIC REV location have a CPMU.
15856 tg3_flag_set(tp, CPMU_PRESENT);
15858 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15859 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15860 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15861 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15862 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15863 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
15864 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
15865 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15866 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15867 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
15868 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
15869 reg = TG3PCI_GEN2_PRODID_ASICREV;
15870 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15871 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15872 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15873 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15874 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15875 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15876 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15877 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15878 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15879 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15880 reg = TG3PCI_GEN15_PRODID_ASICREV;
15882 reg = TG3PCI_PRODID_ASICREV;
15884 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15887 /* Wrong chip ID in 5752 A0. This code can be removed later
15888 * as A0 is not in production.
15890 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15891 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15893 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15894 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15896 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15897 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15898 tg3_asic_rev(tp) == ASIC_REV_5720)
15899 tg3_flag_set(tp, 5717_PLUS);
15901 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15902 tg3_asic_rev(tp) == ASIC_REV_57766)
15903 tg3_flag_set(tp, 57765_CLASS);
15905 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15906 tg3_asic_rev(tp) == ASIC_REV_5762)
15907 tg3_flag_set(tp, 57765_PLUS);
15909 /* Intentionally exclude ASIC_REV_5906 */
15910 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15911 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15912 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15913 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15914 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15915 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15916 tg3_flag(tp, 57765_PLUS))
15917 tg3_flag_set(tp, 5755_PLUS);
15919 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15920 tg3_asic_rev(tp) == ASIC_REV_5714)
15921 tg3_flag_set(tp, 5780_CLASS);
15923 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15924 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15925 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15926 tg3_flag(tp, 5755_PLUS) ||
15927 tg3_flag(tp, 5780_CLASS))
15928 tg3_flag_set(tp, 5750_PLUS);
15930 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15931 tg3_flag(tp, 5750_PLUS))
15932 tg3_flag_set(tp, 5705_PLUS);
15935 static bool tg3_10_100_only_device(struct tg3 *tp,
15936 const struct pci_device_id *ent)
15938 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15940 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15941 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15942 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15945 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15946 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15947 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15957 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15960 u32 pci_state_reg, grc_misc_cfg;
15965 /* Force memory write invalidate off. If we leave it on,
15966 * then on 5700_BX chips we have to enable a workaround.
15967 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15968 * to match the cacheline size. The Broadcom driver have this
15969 * workaround but turns MWI off all the times so never uses
15970 * it. This seems to suggest that the workaround is insufficient.
15972 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15973 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15974 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15976 /* Important! -- Make sure register accesses are byteswapped
15977 * correctly. Also, for those chips that require it, make
15978 * sure that indirect register accesses are enabled before
15979 * the first operation.
15981 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15983 tp->misc_host_ctrl |= (misc_ctrl_reg &
15984 MISC_HOST_CTRL_CHIPREV);
15985 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15986 tp->misc_host_ctrl);
15988 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15990 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15991 * we need to disable memory and use config. cycles
15992 * only to access all registers. The 5702/03 chips
15993 * can mistakenly decode the special cycles from the
15994 * ICH chipsets as memory write cycles, causing corruption
15995 * of register and memory space. Only certain ICH bridges
15996 * will drive special cycles with non-zero data during the
15997 * address phase which can fall within the 5703's address
15998 * range. This is not an ICH bug as the PCI spec allows
15999 * non-zero address during special cycles. However, only
16000 * these ICH bridges are known to drive non-zero addresses
16001 * during special cycles.
16003 * Since special cycles do not cross PCI bridges, we only
16004 * enable this workaround if the 5703 is on the secondary
16005 * bus of these ICH bridges.
16007 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16008 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16009 static struct tg3_dev_id {
16013 } ich_chipsets[] = {
16014 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16016 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16018 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16020 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16024 struct tg3_dev_id *pci_id = &ich_chipsets[0];
16025 struct pci_dev *bridge = NULL;
16027 while (pci_id->vendor != 0) {
16028 bridge = pci_get_device(pci_id->vendor, pci_id->device,
16034 if (pci_id->rev != PCI_ANY_ID) {
16035 if (bridge->revision > pci_id->rev)
16038 if (bridge->subordinate &&
16039 (bridge->subordinate->number ==
16040 tp->pdev->bus->number)) {
16041 tg3_flag_set(tp, ICH_WORKAROUND);
16042 pci_dev_put(bridge);
16048 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16049 static struct tg3_dev_id {
16052 } bridge_chipsets[] = {
16053 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16054 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16057 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16058 struct pci_dev *bridge = NULL;
16060 while (pci_id->vendor != 0) {
16061 bridge = pci_get_device(pci_id->vendor,
16068 if (bridge->subordinate &&
16069 (bridge->subordinate->number <=
16070 tp->pdev->bus->number) &&
16071 (bridge->subordinate->busn_res.end >=
16072 tp->pdev->bus->number)) {
16073 tg3_flag_set(tp, 5701_DMA_BUG);
16074 pci_dev_put(bridge);
16080 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
16081 * DMA addresses > 40-bit. This bridge may have other additional
16082 * 57xx devices behind it in some 4-port NIC designs for example.
16083 * Any tg3 device found behind the bridge will also need the 40-bit
16086 if (tg3_flag(tp, 5780_CLASS)) {
16087 tg3_flag_set(tp, 40BIT_DMA_BUG);
16088 tp->msi_cap = tp->pdev->msi_cap;
16090 struct pci_dev *bridge = NULL;
16093 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16094 PCI_DEVICE_ID_SERVERWORKS_EPB,
16096 if (bridge && bridge->subordinate &&
16097 (bridge->subordinate->number <=
16098 tp->pdev->bus->number) &&
16099 (bridge->subordinate->busn_res.end >=
16100 tp->pdev->bus->number)) {
16101 tg3_flag_set(tp, 40BIT_DMA_BUG);
16102 pci_dev_put(bridge);
16108 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16109 tg3_asic_rev(tp) == ASIC_REV_5714)
16110 tp->pdev_peer = tg3_find_peer(tp);
16112 /* Determine TSO capabilities */
16113 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16114 ; /* Do nothing. HW bug. */
16115 else if (tg3_flag(tp, 57765_PLUS))
16116 tg3_flag_set(tp, HW_TSO_3);
16117 else if (tg3_flag(tp, 5755_PLUS) ||
16118 tg3_asic_rev(tp) == ASIC_REV_5906)
16119 tg3_flag_set(tp, HW_TSO_2);
16120 else if (tg3_flag(tp, 5750_PLUS)) {
16121 tg3_flag_set(tp, HW_TSO_1);
16122 tg3_flag_set(tp, TSO_BUG);
16123 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16124 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16125 tg3_flag_clear(tp, TSO_BUG);
16126 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16127 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16128 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16129 tg3_flag_set(tp, FW_TSO);
16130 tg3_flag_set(tp, TSO_BUG);
16131 if (tg3_asic_rev(tp) == ASIC_REV_5705)
16132 tp->fw_needed = FIRMWARE_TG3TSO5;
16134 tp->fw_needed = FIRMWARE_TG3TSO;
16137 /* Selectively allow TSO based on operating conditions */
16138 if (tg3_flag(tp, HW_TSO_1) ||
16139 tg3_flag(tp, HW_TSO_2) ||
16140 tg3_flag(tp, HW_TSO_3) ||
16141 tg3_flag(tp, FW_TSO)) {
16142 /* For firmware TSO, assume ASF is disabled.
16143 * We'll disable TSO later if we discover ASF
16144 * is enabled in tg3_get_eeprom_hw_cfg().
16146 tg3_flag_set(tp, TSO_CAPABLE);
16148 tg3_flag_clear(tp, TSO_CAPABLE);
16149 tg3_flag_clear(tp, TSO_BUG);
16150 tp->fw_needed = NULL;
16153 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16154 tp->fw_needed = FIRMWARE_TG3;
16156 if (tg3_asic_rev(tp) == ASIC_REV_57766)
16157 tp->fw_needed = FIRMWARE_TG357766;
16161 if (tg3_flag(tp, 5750_PLUS)) {
16162 tg3_flag_set(tp, SUPPORT_MSI);
16163 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16164 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16165 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16166 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16167 tp->pdev_peer == tp->pdev))
16168 tg3_flag_clear(tp, SUPPORT_MSI);
16170 if (tg3_flag(tp, 5755_PLUS) ||
16171 tg3_asic_rev(tp) == ASIC_REV_5906) {
16172 tg3_flag_set(tp, 1SHOT_MSI);
16175 if (tg3_flag(tp, 57765_PLUS)) {
16176 tg3_flag_set(tp, SUPPORT_MSIX);
16177 tp->irq_max = TG3_IRQ_MAX_VECS;
16183 if (tp->irq_max > 1) {
16184 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16185 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16187 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16188 tg3_asic_rev(tp) == ASIC_REV_5720)
16189 tp->txq_max = tp->irq_max - 1;
16192 if (tg3_flag(tp, 5755_PLUS) ||
16193 tg3_asic_rev(tp) == ASIC_REV_5906)
16194 tg3_flag_set(tp, SHORT_DMA_BUG);
16196 if (tg3_asic_rev(tp) == ASIC_REV_5719)
16197 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16199 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16200 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16201 tg3_asic_rev(tp) == ASIC_REV_5720 ||
16202 tg3_asic_rev(tp) == ASIC_REV_5762)
16203 tg3_flag_set(tp, LRG_PROD_RING_CAP);
16205 if (tg3_flag(tp, 57765_PLUS) &&
16206 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16207 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16209 if (!tg3_flag(tp, 5705_PLUS) ||
16210 tg3_flag(tp, 5780_CLASS) ||
16211 tg3_flag(tp, USE_JUMBO_BDFLAG))
16212 tg3_flag_set(tp, JUMBO_CAPABLE);
16214 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16217 if (pci_is_pcie(tp->pdev)) {
16220 tg3_flag_set(tp, PCI_EXPRESS);
16222 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16223 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16224 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16225 tg3_flag_clear(tp, HW_TSO_2);
16226 tg3_flag_clear(tp, TSO_CAPABLE);
16228 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16229 tg3_asic_rev(tp) == ASIC_REV_5761 ||
16230 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16231 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16232 tg3_flag_set(tp, CLKREQ_BUG);
16233 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16234 tg3_flag_set(tp, L1PLLPD_EN);
16236 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16237 /* BCM5785 devices are effectively PCIe devices, and should
16238 * follow PCIe codepaths, but do not have a PCIe capabilities
16241 tg3_flag_set(tp, PCI_EXPRESS);
16242 } else if (!tg3_flag(tp, 5705_PLUS) ||
16243 tg3_flag(tp, 5780_CLASS)) {
16244 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16245 if (!tp->pcix_cap) {
16246 dev_err(&tp->pdev->dev,
16247 "Cannot find PCI-X capability, aborting\n");
16251 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16252 tg3_flag_set(tp, PCIX_MODE);
16255 /* If we have an AMD 762 or VIA K8T800 chipset, write
16256 * reordering to the mailbox registers done by the host
16257 * controller can cause major troubles. We read back from
16258 * every mailbox register write to force the writes to be
16259 * posted to the chip in order.
16261 if (pci_dev_present(tg3_write_reorder_chipsets) &&
16262 !tg3_flag(tp, PCI_EXPRESS))
16263 tg3_flag_set(tp, MBOX_WRITE_REORDER);
16265 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16266 &tp->pci_cacheline_sz);
16267 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16268 &tp->pci_lat_timer);
16269 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16270 tp->pci_lat_timer < 64) {
16271 tp->pci_lat_timer = 64;
16272 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16273 tp->pci_lat_timer);
16276 /* Important! -- It is critical that the PCI-X hw workaround
16277 * situation is decided before the first MMIO register access.
16279 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16280 /* 5700 BX chips need to have their TX producer index
16281 * mailboxes written twice to workaround a bug.
16283 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16285 /* If we are in PCI-X mode, enable register write workaround.
16287 * The workaround is to use indirect register accesses
16288 * for all chip writes not to mailbox registers.
16290 if (tg3_flag(tp, PCIX_MODE)) {
16293 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16295 /* The chip can have it's power management PCI config
16296 * space registers clobbered due to this bug.
16297 * So explicitly force the chip into D0 here.
16299 pci_read_config_dword(tp->pdev,
16300 tp->pdev->pm_cap + PCI_PM_CTRL,
16302 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16303 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16304 pci_write_config_dword(tp->pdev,
16305 tp->pdev->pm_cap + PCI_PM_CTRL,
16308 /* Also, force SERR#/PERR# in PCI command. */
16309 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16310 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16311 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16315 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16316 tg3_flag_set(tp, PCI_HIGH_SPEED);
16317 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16318 tg3_flag_set(tp, PCI_32BIT);
16320 /* Chip-specific fixup from Broadcom driver */
16321 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16322 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16323 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16324 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16327 /* Default fast path register access methods */
16328 tp->read32 = tg3_read32;
16329 tp->write32 = tg3_write32;
16330 tp->read32_mbox = tg3_read32;
16331 tp->write32_mbox = tg3_write32;
16332 tp->write32_tx_mbox = tg3_write32;
16333 tp->write32_rx_mbox = tg3_write32;
16335 /* Various workaround register access methods */
16336 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16337 tp->write32 = tg3_write_indirect_reg32;
16338 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16339 (tg3_flag(tp, PCI_EXPRESS) &&
16340 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16342 * Back to back register writes can cause problems on these
16343 * chips, the workaround is to read back all reg writes
16344 * except those to mailbox regs.
16346 * See tg3_write_indirect_reg32().
16348 tp->write32 = tg3_write_flush_reg32;
16351 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16352 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16353 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16354 tp->write32_rx_mbox = tg3_write_flush_reg32;
16357 if (tg3_flag(tp, ICH_WORKAROUND)) {
16358 tp->read32 = tg3_read_indirect_reg32;
16359 tp->write32 = tg3_write_indirect_reg32;
16360 tp->read32_mbox = tg3_read_indirect_mbox;
16361 tp->write32_mbox = tg3_write_indirect_mbox;
16362 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16363 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16368 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16369 pci_cmd &= ~PCI_COMMAND_MEMORY;
16370 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16372 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16373 tp->read32_mbox = tg3_read32_mbox_5906;
16374 tp->write32_mbox = tg3_write32_mbox_5906;
16375 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16376 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16379 if (tp->write32 == tg3_write_indirect_reg32 ||
16380 (tg3_flag(tp, PCIX_MODE) &&
16381 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16382 tg3_asic_rev(tp) == ASIC_REV_5701)))
16383 tg3_flag_set(tp, SRAM_USE_CONFIG);
16385 /* The memory arbiter has to be enabled in order for SRAM accesses
16386 * to succeed. Normally on powerup the tg3 chip firmware will make
16387 * sure it is enabled, but other entities such as system netboot
16388 * code might disable it.
16390 val = tr32(MEMARB_MODE);
16391 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16393 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16394 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16395 tg3_flag(tp, 5780_CLASS)) {
16396 if (tg3_flag(tp, PCIX_MODE)) {
16397 pci_read_config_dword(tp->pdev,
16398 tp->pcix_cap + PCI_X_STATUS,
16400 tp->pci_fn = val & 0x7;
16402 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16403 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16404 tg3_asic_rev(tp) == ASIC_REV_5720) {
16405 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16406 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16407 val = tr32(TG3_CPMU_STATUS);
16409 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16410 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16412 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16413 TG3_CPMU_STATUS_FSHFT_5719;
16416 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16417 tp->write32_tx_mbox = tg3_write_flush_reg32;
16418 tp->write32_rx_mbox = tg3_write_flush_reg32;
16421 /* Get eeprom hw config before calling tg3_set_power_state().
16422 * In particular, the TG3_FLAG_IS_NIC flag must be
16423 * determined before calling tg3_set_power_state() so that
16424 * we know whether or not to switch out of Vaux power.
16425 * When the flag is set, it means that GPIO1 is used for eeprom
16426 * write protect and also implies that it is a LOM where GPIOs
16427 * are not used to switch power.
16429 tg3_get_eeprom_hw_cfg(tp);
16431 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16432 tg3_flag_clear(tp, TSO_CAPABLE);
16433 tg3_flag_clear(tp, TSO_BUG);
16434 tp->fw_needed = NULL;
16437 if (tg3_flag(tp, ENABLE_APE)) {
16438 /* Allow reads and writes to the
16439 * APE register and memory space.
16441 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16442 PCISTATE_ALLOW_APE_SHMEM_WR |
16443 PCISTATE_ALLOW_APE_PSPACE_WR;
16444 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16447 tg3_ape_lock_init(tp);
16450 /* Set up tp->grc_local_ctrl before calling
16451 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16452 * will bring 5700's external PHY out of reset.
16453 * It is also used as eeprom write protect on LOMs.
16455 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16456 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16457 tg3_flag(tp, EEPROM_WRITE_PROT))
16458 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16459 GRC_LCLCTRL_GPIO_OUTPUT1);
16460 /* Unused GPIO3 must be driven as output on 5752 because there
16461 * are no pull-up resistors on unused GPIO pins.
16463 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16464 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16466 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16467 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16468 tg3_flag(tp, 57765_CLASS))
16469 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16471 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16472 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16473 /* Turn off the debug UART. */
16474 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16475 if (tg3_flag(tp, IS_NIC))
16476 /* Keep VMain power. */
16477 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16478 GRC_LCLCTRL_GPIO_OUTPUT0;
16481 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16482 tp->grc_local_ctrl |=
16483 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16485 /* Switch out of Vaux if it is a NIC */
16486 tg3_pwrsrc_switch_to_vmain(tp);
16488 /* Derive initial jumbo mode from MTU assigned in
16489 * ether_setup() via the alloc_etherdev() call
16491 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16492 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16494 /* Determine WakeOnLan speed to use. */
16495 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16496 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16497 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16498 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16499 tg3_flag_clear(tp, WOL_SPEED_100MB);
16501 tg3_flag_set(tp, WOL_SPEED_100MB);
16504 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16505 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16507 /* A few boards don't want Ethernet@WireSpeed phy feature */
16508 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16509 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16510 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16511 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16512 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16513 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16514 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16516 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16517 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16518 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16519 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16520 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16522 if (tg3_flag(tp, 5705_PLUS) &&
16523 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16524 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16525 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16526 !tg3_flag(tp, 57765_PLUS)) {
16527 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16528 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16529 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16530 tg3_asic_rev(tp) == ASIC_REV_5761) {
16531 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16532 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16533 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16534 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16535 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16537 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16540 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16541 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16542 tp->phy_otp = tg3_read_otp_phycfg(tp);
16543 if (tp->phy_otp == 0)
16544 tp->phy_otp = TG3_OTP_DEFAULT;
16547 if (tg3_flag(tp, CPMU_PRESENT))
16548 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16550 tp->mi_mode = MAC_MI_MODE_BASE;
16552 tp->coalesce_mode = 0;
16553 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16554 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16555 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16557 /* Set these bits to enable statistics workaround. */
16558 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16559 tg3_asic_rev(tp) == ASIC_REV_5762 ||
16560 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16561 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16562 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16563 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16566 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16567 tg3_asic_rev(tp) == ASIC_REV_57780)
16568 tg3_flag_set(tp, USE_PHYLIB);
16570 err = tg3_mdio_init(tp);
16574 /* Initialize data/descriptor byte/word swapping. */
16575 val = tr32(GRC_MODE);
16576 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16577 tg3_asic_rev(tp) == ASIC_REV_5762)
16578 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16579 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16580 GRC_MODE_B2HRX_ENABLE |
16581 GRC_MODE_HTX2B_ENABLE |
16582 GRC_MODE_HOST_STACKUP);
16584 val &= GRC_MODE_HOST_STACKUP;
16586 tw32(GRC_MODE, val | tp->grc_mode);
16588 tg3_switch_clocks(tp);
16590 /* Clear this out for sanity. */
16591 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16593 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16594 tw32(TG3PCI_REG_BASE_ADDR, 0);
16596 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16598 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16599 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16600 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16601 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16602 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16603 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16604 void __iomem *sram_base;
16606 /* Write some dummy words into the SRAM status block
16607 * area, see if it reads back correctly. If the return
16608 * value is bad, force enable the PCIX workaround.
16610 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16612 writel(0x00000000, sram_base);
16613 writel(0x00000000, sram_base + 4);
16614 writel(0xffffffff, sram_base + 4);
16615 if (readl(sram_base) != 0x00000000)
16616 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16621 tg3_nvram_init(tp);
16623 /* If the device has an NVRAM, no need to load patch firmware */
16624 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16625 !tg3_flag(tp, NO_NVRAM))
16626 tp->fw_needed = NULL;
16628 grc_misc_cfg = tr32(GRC_MISC_CFG);
16629 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16631 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16632 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16633 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16634 tg3_flag_set(tp, IS_5788);
16636 if (!tg3_flag(tp, IS_5788) &&
16637 tg3_asic_rev(tp) != ASIC_REV_5700)
16638 tg3_flag_set(tp, TAGGED_STATUS);
16639 if (tg3_flag(tp, TAGGED_STATUS)) {
16640 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16641 HOSTCC_MODE_CLRTICK_TXBD);
16643 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16644 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16645 tp->misc_host_ctrl);
16648 /* Preserve the APE MAC_MODE bits */
16649 if (tg3_flag(tp, ENABLE_APE))
16650 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16654 if (tg3_10_100_only_device(tp, ent))
16655 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16657 err = tg3_phy_probe(tp);
16659 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16660 /* ... but do not return immediately ... */
16665 tg3_read_fw_ver(tp);
16667 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16668 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16670 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16671 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16673 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16676 /* 5700 {AX,BX} chips have a broken status block link
16677 * change bit implementation, so we must use the
16678 * status register in those cases.
16680 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16681 tg3_flag_set(tp, USE_LINKCHG_REG);
16683 tg3_flag_clear(tp, USE_LINKCHG_REG);
16685 /* The led_ctrl is set during tg3_phy_probe, here we might
16686 * have to force the link status polling mechanism based
16687 * upon subsystem IDs.
16689 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16690 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16691 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16692 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16693 tg3_flag_set(tp, USE_LINKCHG_REG);
16696 /* For all SERDES we poll the MAC status register. */
16697 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16698 tg3_flag_set(tp, POLL_SERDES);
16700 tg3_flag_clear(tp, POLL_SERDES);
16702 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16703 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16704 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16705 tg3_flag(tp, PCIX_MODE)) {
16706 tp->rx_offset = NET_SKB_PAD;
16707 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16708 tp->rx_copy_thresh = ~(u16)0;
16712 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16713 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16714 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16716 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16718 /* Increment the rx prod index on the rx std ring by at most
16719 * 8 for these chips to workaround hw errata.
16721 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16722 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16723 tg3_asic_rev(tp) == ASIC_REV_5755)
16724 tp->rx_std_max_post = 8;
16726 if (tg3_flag(tp, ASPM_WORKAROUND))
16727 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16728 PCIE_PWR_MGMT_L1_THRESH_MSK;
16733 #ifdef CONFIG_SPARC
16734 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16736 struct net_device *dev = tp->dev;
16737 struct pci_dev *pdev = tp->pdev;
16738 struct device_node *dp = pci_device_to_OF_node(pdev);
16739 const unsigned char *addr;
16742 addr = of_get_property(dp, "local-mac-address", &len);
16743 if (addr && len == ETH_ALEN) {
16744 memcpy(dev->dev_addr, addr, ETH_ALEN);
16750 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16752 struct net_device *dev = tp->dev;
16754 memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16759 static int tg3_get_device_address(struct tg3 *tp)
16761 struct net_device *dev = tp->dev;
16762 u32 hi, lo, mac_offset;
16766 #ifdef CONFIG_SPARC
16767 if (!tg3_get_macaddr_sparc(tp))
16771 if (tg3_flag(tp, IS_SSB_CORE)) {
16772 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16773 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16778 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16779 tg3_flag(tp, 5780_CLASS)) {
16780 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16782 if (tg3_nvram_lock(tp))
16783 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16785 tg3_nvram_unlock(tp);
16786 } else if (tg3_flag(tp, 5717_PLUS)) {
16787 if (tp->pci_fn & 1)
16789 if (tp->pci_fn > 1)
16790 mac_offset += 0x18c;
16791 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16794 /* First try to get it from MAC address mailbox. */
16795 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16796 if ((hi >> 16) == 0x484b) {
16797 dev->dev_addr[0] = (hi >> 8) & 0xff;
16798 dev->dev_addr[1] = (hi >> 0) & 0xff;
16800 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16801 dev->dev_addr[2] = (lo >> 24) & 0xff;
16802 dev->dev_addr[3] = (lo >> 16) & 0xff;
16803 dev->dev_addr[4] = (lo >> 8) & 0xff;
16804 dev->dev_addr[5] = (lo >> 0) & 0xff;
16806 /* Some old bootcode may report a 0 MAC address in SRAM */
16807 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16810 /* Next, try NVRAM. */
16811 if (!tg3_flag(tp, NO_NVRAM) &&
16812 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16813 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16814 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16815 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16817 /* Finally just fetch it out of the MAC control regs. */
16819 hi = tr32(MAC_ADDR_0_HIGH);
16820 lo = tr32(MAC_ADDR_0_LOW);
16822 dev->dev_addr[5] = lo & 0xff;
16823 dev->dev_addr[4] = (lo >> 8) & 0xff;
16824 dev->dev_addr[3] = (lo >> 16) & 0xff;
16825 dev->dev_addr[2] = (lo >> 24) & 0xff;
16826 dev->dev_addr[1] = hi & 0xff;
16827 dev->dev_addr[0] = (hi >> 8) & 0xff;
16831 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16832 #ifdef CONFIG_SPARC
16833 if (!tg3_get_default_macaddr_sparc(tp))
16841 #define BOUNDARY_SINGLE_CACHELINE 1
16842 #define BOUNDARY_MULTI_CACHELINE 2
16844 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16846 int cacheline_size;
16850 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16852 cacheline_size = 1024;
16854 cacheline_size = (int) byte * 4;
16856 /* On 5703 and later chips, the boundary bits have no
16859 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16860 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16861 !tg3_flag(tp, PCI_EXPRESS))
16864 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16865 goal = BOUNDARY_MULTI_CACHELINE;
16867 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16868 goal = BOUNDARY_SINGLE_CACHELINE;
16874 if (tg3_flag(tp, 57765_PLUS)) {
16875 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16882 /* PCI controllers on most RISC systems tend to disconnect
16883 * when a device tries to burst across a cache-line boundary.
16884 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16886 * Unfortunately, for PCI-E there are only limited
16887 * write-side controls for this, and thus for reads
16888 * we will still get the disconnects. We'll also waste
16889 * these PCI cycles for both read and write for chips
16890 * other than 5700 and 5701 which do not implement the
16893 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16894 switch (cacheline_size) {
16899 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16900 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16901 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16903 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16904 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16909 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16910 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16914 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16915 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16918 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16919 switch (cacheline_size) {
16923 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16924 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16925 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16931 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16932 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16936 switch (cacheline_size) {
16938 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16939 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16940 DMA_RWCTRL_WRITE_BNDRY_16);
16945 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16946 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16947 DMA_RWCTRL_WRITE_BNDRY_32);
16952 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16953 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16954 DMA_RWCTRL_WRITE_BNDRY_64);
16959 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16960 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16961 DMA_RWCTRL_WRITE_BNDRY_128);
16966 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16967 DMA_RWCTRL_WRITE_BNDRY_256);
16970 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16971 DMA_RWCTRL_WRITE_BNDRY_512);
16975 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16976 DMA_RWCTRL_WRITE_BNDRY_1024);
16985 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16986 int size, bool to_device)
16988 struct tg3_internal_buffer_desc test_desc;
16989 u32 sram_dma_descs;
16992 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16994 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16995 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16996 tw32(RDMAC_STATUS, 0);
16997 tw32(WDMAC_STATUS, 0);
16999 tw32(BUFMGR_MODE, 0);
17000 tw32(FTQ_RESET, 0);
17002 test_desc.addr_hi = ((u64) buf_dma) >> 32;
17003 test_desc.addr_lo = buf_dma & 0xffffffff;
17004 test_desc.nic_mbuf = 0x00002100;
17005 test_desc.len = size;
17008 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17009 * the *second* time the tg3 driver was getting loaded after an
17012 * Broadcom tells me:
17013 * ...the DMA engine is connected to the GRC block and a DMA
17014 * reset may affect the GRC block in some unpredictable way...
17015 * The behavior of resets to individual blocks has not been tested.
17017 * Broadcom noted the GRC reset will also reset all sub-components.
17020 test_desc.cqid_sqid = (13 << 8) | 2;
17022 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17025 test_desc.cqid_sqid = (16 << 8) | 7;
17027 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17030 test_desc.flags = 0x00000005;
17032 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17035 val = *(((u32 *)&test_desc) + i);
17036 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17037 sram_dma_descs + (i * sizeof(u32)));
17038 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17040 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17043 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17045 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17048 for (i = 0; i < 40; i++) {
17052 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17054 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17055 if ((val & 0xffff) == sram_dma_descs) {
17066 #define TEST_BUFFER_SIZE 0x2000
17068 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
17069 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17073 static int tg3_test_dma(struct tg3 *tp)
17075 dma_addr_t buf_dma;
17076 u32 *buf, saved_dma_rwctrl;
17079 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17080 &buf_dma, GFP_KERNEL);
17086 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17087 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17089 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17091 if (tg3_flag(tp, 57765_PLUS))
17094 if (tg3_flag(tp, PCI_EXPRESS)) {
17095 /* DMA read watermark not used on PCIE */
17096 tp->dma_rwctrl |= 0x00180000;
17097 } else if (!tg3_flag(tp, PCIX_MODE)) {
17098 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17099 tg3_asic_rev(tp) == ASIC_REV_5750)
17100 tp->dma_rwctrl |= 0x003f0000;
17102 tp->dma_rwctrl |= 0x003f000f;
17104 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17105 tg3_asic_rev(tp) == ASIC_REV_5704) {
17106 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17107 u32 read_water = 0x7;
17109 /* If the 5704 is behind the EPB bridge, we can
17110 * do the less restrictive ONE_DMA workaround for
17111 * better performance.
17113 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17114 tg3_asic_rev(tp) == ASIC_REV_5704)
17115 tp->dma_rwctrl |= 0x8000;
17116 else if (ccval == 0x6 || ccval == 0x7)
17117 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17119 if (tg3_asic_rev(tp) == ASIC_REV_5703)
17121 /* Set bit 23 to enable PCIX hw bug fix */
17123 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17124 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17126 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17127 /* 5780 always in PCIX mode */
17128 tp->dma_rwctrl |= 0x00144000;
17129 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17130 /* 5714 always in PCIX mode */
17131 tp->dma_rwctrl |= 0x00148000;
17133 tp->dma_rwctrl |= 0x001b000f;
17136 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17137 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17139 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17140 tg3_asic_rev(tp) == ASIC_REV_5704)
17141 tp->dma_rwctrl &= 0xfffffff0;
17143 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17144 tg3_asic_rev(tp) == ASIC_REV_5701) {
17145 /* Remove this if it causes problems for some boards. */
17146 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17148 /* On 5700/5701 chips, we need to set this bit.
17149 * Otherwise the chip will issue cacheline transactions
17150 * to streamable DMA memory with not all the byte
17151 * enables turned on. This is an error on several
17152 * RISC PCI controllers, in particular sparc64.
17154 * On 5703/5704 chips, this bit has been reassigned
17155 * a different meaning. In particular, it is used
17156 * on those chips to enable a PCI-X workaround.
17158 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17161 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17164 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17165 tg3_asic_rev(tp) != ASIC_REV_5701)
17168 /* It is best to perform DMA test with maximum write burst size
17169 * to expose the 5700/5701 write DMA bug.
17171 saved_dma_rwctrl = tp->dma_rwctrl;
17172 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17173 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17178 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17181 /* Send the buffer to the chip. */
17182 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17184 dev_err(&tp->pdev->dev,
17185 "%s: Buffer write failed. err = %d\n",
17190 /* Now read it back. */
17191 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17193 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17194 "err = %d\n", __func__, ret);
17199 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17203 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17204 DMA_RWCTRL_WRITE_BNDRY_16) {
17205 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17206 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17207 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17210 dev_err(&tp->pdev->dev,
17211 "%s: Buffer corrupted on read back! "
17212 "(%d != %d)\n", __func__, p[i], i);
17218 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17224 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17225 DMA_RWCTRL_WRITE_BNDRY_16) {
17226 /* DMA test passed without adjusting DMA boundary,
17227 * now look for chipsets that are known to expose the
17228 * DMA bug without failing the test.
17230 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17231 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17232 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17234 /* Safe to use the calculated DMA boundary. */
17235 tp->dma_rwctrl = saved_dma_rwctrl;
17238 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17242 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17247 static void tg3_init_bufmgr_config(struct tg3 *tp)
17249 if (tg3_flag(tp, 57765_PLUS)) {
17250 tp->bufmgr_config.mbuf_read_dma_low_water =
17251 DEFAULT_MB_RDMA_LOW_WATER_5705;
17252 tp->bufmgr_config.mbuf_mac_rx_low_water =
17253 DEFAULT_MB_MACRX_LOW_WATER_57765;
17254 tp->bufmgr_config.mbuf_high_water =
17255 DEFAULT_MB_HIGH_WATER_57765;
17257 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17258 DEFAULT_MB_RDMA_LOW_WATER_5705;
17259 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17260 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17261 tp->bufmgr_config.mbuf_high_water_jumbo =
17262 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17263 } else if (tg3_flag(tp, 5705_PLUS)) {
17264 tp->bufmgr_config.mbuf_read_dma_low_water =
17265 DEFAULT_MB_RDMA_LOW_WATER_5705;
17266 tp->bufmgr_config.mbuf_mac_rx_low_water =
17267 DEFAULT_MB_MACRX_LOW_WATER_5705;
17268 tp->bufmgr_config.mbuf_high_water =
17269 DEFAULT_MB_HIGH_WATER_5705;
17270 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17271 tp->bufmgr_config.mbuf_mac_rx_low_water =
17272 DEFAULT_MB_MACRX_LOW_WATER_5906;
17273 tp->bufmgr_config.mbuf_high_water =
17274 DEFAULT_MB_HIGH_WATER_5906;
17277 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17278 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17279 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17280 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17281 tp->bufmgr_config.mbuf_high_water_jumbo =
17282 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17284 tp->bufmgr_config.mbuf_read_dma_low_water =
17285 DEFAULT_MB_RDMA_LOW_WATER;
17286 tp->bufmgr_config.mbuf_mac_rx_low_water =
17287 DEFAULT_MB_MACRX_LOW_WATER;
17288 tp->bufmgr_config.mbuf_high_water =
17289 DEFAULT_MB_HIGH_WATER;
17291 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17292 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17293 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17294 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17295 tp->bufmgr_config.mbuf_high_water_jumbo =
17296 DEFAULT_MB_HIGH_WATER_JUMBO;
17299 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17300 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17303 static char *tg3_phy_string(struct tg3 *tp)
17305 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17306 case TG3_PHY_ID_BCM5400: return "5400";
17307 case TG3_PHY_ID_BCM5401: return "5401";
17308 case TG3_PHY_ID_BCM5411: return "5411";
17309 case TG3_PHY_ID_BCM5701: return "5701";
17310 case TG3_PHY_ID_BCM5703: return "5703";
17311 case TG3_PHY_ID_BCM5704: return "5704";
17312 case TG3_PHY_ID_BCM5705: return "5705";
17313 case TG3_PHY_ID_BCM5750: return "5750";
17314 case TG3_PHY_ID_BCM5752: return "5752";
17315 case TG3_PHY_ID_BCM5714: return "5714";
17316 case TG3_PHY_ID_BCM5780: return "5780";
17317 case TG3_PHY_ID_BCM5755: return "5755";
17318 case TG3_PHY_ID_BCM5787: return "5787";
17319 case TG3_PHY_ID_BCM5784: return "5784";
17320 case TG3_PHY_ID_BCM5756: return "5722/5756";
17321 case TG3_PHY_ID_BCM5906: return "5906";
17322 case TG3_PHY_ID_BCM5761: return "5761";
17323 case TG3_PHY_ID_BCM5718C: return "5718C";
17324 case TG3_PHY_ID_BCM5718S: return "5718S";
17325 case TG3_PHY_ID_BCM57765: return "57765";
17326 case TG3_PHY_ID_BCM5719C: return "5719C";
17327 case TG3_PHY_ID_BCM5720C: return "5720C";
17328 case TG3_PHY_ID_BCM5762: return "5762C";
17329 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17330 case 0: return "serdes";
17331 default: return "unknown";
17335 static char *tg3_bus_string(struct tg3 *tp, char *str)
17337 if (tg3_flag(tp, PCI_EXPRESS)) {
17338 strcpy(str, "PCI Express");
17340 } else if (tg3_flag(tp, PCIX_MODE)) {
17341 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17343 strcpy(str, "PCIX:");
17345 if ((clock_ctrl == 7) ||
17346 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17347 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17348 strcat(str, "133MHz");
17349 else if (clock_ctrl == 0)
17350 strcat(str, "33MHz");
17351 else if (clock_ctrl == 2)
17352 strcat(str, "50MHz");
17353 else if (clock_ctrl == 4)
17354 strcat(str, "66MHz");
17355 else if (clock_ctrl == 6)
17356 strcat(str, "100MHz");
17358 strcpy(str, "PCI:");
17359 if (tg3_flag(tp, PCI_HIGH_SPEED))
17360 strcat(str, "66MHz");
17362 strcat(str, "33MHz");
17364 if (tg3_flag(tp, PCI_32BIT))
17365 strcat(str, ":32-bit");
17367 strcat(str, ":64-bit");
17371 static void tg3_init_coal(struct tg3 *tp)
17373 struct ethtool_coalesce *ec = &tp->coal;
17375 memset(ec, 0, sizeof(*ec));
17376 ec->cmd = ETHTOOL_GCOALESCE;
17377 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17378 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17379 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17380 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17381 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17382 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17383 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17384 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17385 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17387 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17388 HOSTCC_MODE_CLRTICK_TXBD)) {
17389 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17390 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17391 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17392 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17395 if (tg3_flag(tp, 5705_PLUS)) {
17396 ec->rx_coalesce_usecs_irq = 0;
17397 ec->tx_coalesce_usecs_irq = 0;
17398 ec->stats_block_coalesce_usecs = 0;
17402 static int tg3_init_one(struct pci_dev *pdev,
17403 const struct pci_device_id *ent)
17405 struct net_device *dev;
17408 u32 sndmbx, rcvmbx, intmbx;
17410 u64 dma_mask, persist_dma_mask;
17411 netdev_features_t features = 0;
17413 printk_once(KERN_INFO "%s\n", version);
17415 err = pci_enable_device(pdev);
17417 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17421 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17423 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17424 goto err_out_disable_pdev;
17427 pci_set_master(pdev);
17429 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17432 goto err_out_free_res;
17435 SET_NETDEV_DEV(dev, &pdev->dev);
17437 tp = netdev_priv(dev);
17440 tp->rx_mode = TG3_DEF_RX_MODE;
17441 tp->tx_mode = TG3_DEF_TX_MODE;
17445 tp->msg_enable = tg3_debug;
17447 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17449 if (pdev_is_ssb_gige_core(pdev)) {
17450 tg3_flag_set(tp, IS_SSB_CORE);
17451 if (ssb_gige_must_flush_posted_writes(pdev))
17452 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17453 if (ssb_gige_one_dma_at_once(pdev))
17454 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17455 if (ssb_gige_have_roboswitch(pdev)) {
17456 tg3_flag_set(tp, USE_PHYLIB);
17457 tg3_flag_set(tp, ROBOSWITCH);
17459 if (ssb_gige_is_rgmii(pdev))
17460 tg3_flag_set(tp, RGMII_MODE);
17463 /* The word/byte swap controls here control register access byte
17464 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17467 tp->misc_host_ctrl =
17468 MISC_HOST_CTRL_MASK_PCI_INT |
17469 MISC_HOST_CTRL_WORD_SWAP |
17470 MISC_HOST_CTRL_INDIR_ACCESS |
17471 MISC_HOST_CTRL_PCISTATE_RW;
17473 /* The NONFRM (non-frame) byte/word swap controls take effect
17474 * on descriptor entries, anything which isn't packet data.
17476 * The StrongARM chips on the board (one for tx, one for rx)
17477 * are running in big-endian mode.
17479 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17480 GRC_MODE_WSWAP_NONFRM_DATA);
17481 #ifdef __BIG_ENDIAN
17482 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17484 spin_lock_init(&tp->lock);
17485 spin_lock_init(&tp->indirect_lock);
17486 INIT_WORK(&tp->reset_task, tg3_reset_task);
17488 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17490 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17492 goto err_out_free_dev;
17495 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17496 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17497 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17498 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17499 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17500 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17501 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17502 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17503 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17504 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17505 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17506 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17507 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17508 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17509 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17510 tg3_flag_set(tp, ENABLE_APE);
17511 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17512 if (!tp->aperegs) {
17513 dev_err(&pdev->dev,
17514 "Cannot map APE registers, aborting\n");
17516 goto err_out_iounmap;
17520 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17521 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17523 dev->ethtool_ops = &tg3_ethtool_ops;
17524 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17525 dev->netdev_ops = &tg3_netdev_ops;
17526 dev->irq = pdev->irq;
17528 err = tg3_get_invariants(tp, ent);
17530 dev_err(&pdev->dev,
17531 "Problem fetching invariants of chip, aborting\n");
17532 goto err_out_apeunmap;
17535 /* The EPB bridge inside 5714, 5715, and 5780 and any
17536 * device behind the EPB cannot support DMA addresses > 40-bit.
17537 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17538 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17539 * do DMA address check in tg3_start_xmit().
17541 if (tg3_flag(tp, IS_5788))
17542 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17543 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17544 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17545 #ifdef CONFIG_HIGHMEM
17546 dma_mask = DMA_BIT_MASK(64);
17549 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17551 /* Configure DMA attributes. */
17552 if (dma_mask > DMA_BIT_MASK(32)) {
17553 err = pci_set_dma_mask(pdev, dma_mask);
17555 features |= NETIF_F_HIGHDMA;
17556 err = pci_set_consistent_dma_mask(pdev,
17559 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17560 "DMA for consistent allocations\n");
17561 goto err_out_apeunmap;
17565 if (err || dma_mask == DMA_BIT_MASK(32)) {
17566 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17568 dev_err(&pdev->dev,
17569 "No usable DMA configuration, aborting\n");
17570 goto err_out_apeunmap;
17574 tg3_init_bufmgr_config(tp);
17576 features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
17578 /* 5700 B0 chips do not support checksumming correctly due
17579 * to hardware bugs.
17581 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17582 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17584 if (tg3_flag(tp, 5755_PLUS))
17585 features |= NETIF_F_IPV6_CSUM;
17588 /* TSO is on by default on chips that support hardware TSO.
17589 * Firmware TSO on older chips gives lower performance, so it
17590 * is off by default, but can be enabled using ethtool.
17592 if ((tg3_flag(tp, HW_TSO_1) ||
17593 tg3_flag(tp, HW_TSO_2) ||
17594 tg3_flag(tp, HW_TSO_3)) &&
17595 (features & NETIF_F_IP_CSUM))
17596 features |= NETIF_F_TSO;
17597 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17598 if (features & NETIF_F_IPV6_CSUM)
17599 features |= NETIF_F_TSO6;
17600 if (tg3_flag(tp, HW_TSO_3) ||
17601 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17602 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17603 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17604 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17605 tg3_asic_rev(tp) == ASIC_REV_57780)
17606 features |= NETIF_F_TSO_ECN;
17609 dev->features |= features;
17610 dev->vlan_features |= features;
17613 * Add loopback capability only for a subset of devices that support
17614 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17615 * loopback for the remaining devices.
17617 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17618 !tg3_flag(tp, CPMU_PRESENT))
17619 /* Add the loopback capability */
17620 features |= NETIF_F_LOOPBACK;
17622 dev->hw_features |= features;
17624 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17625 !tg3_flag(tp, TSO_CAPABLE) &&
17626 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17627 tg3_flag_set(tp, MAX_RXPEND_64);
17628 tp->rx_pending = 63;
17631 err = tg3_get_device_address(tp);
17633 dev_err(&pdev->dev,
17634 "Could not obtain valid ethernet address, aborting\n");
17635 goto err_out_apeunmap;
17639 * Reset chip in case UNDI or EFI driver did not shutdown
17640 * DMA self test will enable WDMAC and we'll see (spurious)
17641 * pending DMA on the PCI bus at that point.
17643 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17644 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17645 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17646 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17649 err = tg3_test_dma(tp);
17651 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17652 goto err_out_apeunmap;
17655 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17656 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17657 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17658 for (i = 0; i < tp->irq_max; i++) {
17659 struct tg3_napi *tnapi = &tp->napi[i];
17662 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17664 tnapi->int_mbox = intmbx;
17670 tnapi->consmbox = rcvmbx;
17671 tnapi->prodmbox = sndmbx;
17674 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17676 tnapi->coal_now = HOSTCC_MODE_NOW;
17678 if (!tg3_flag(tp, SUPPORT_MSIX))
17682 * If we support MSIX, we'll be using RSS. If we're using
17683 * RSS, the first vector only handles link interrupts and the
17684 * remaining vectors handle rx and tx interrupts. Reuse the
17685 * mailbox values for the next iteration. The values we setup
17686 * above are still useful for the single vectored mode.
17701 pci_set_drvdata(pdev, dev);
17703 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17704 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17705 tg3_asic_rev(tp) == ASIC_REV_5762)
17706 tg3_flag_set(tp, PTP_CAPABLE);
17708 tg3_timer_init(tp);
17710 tg3_carrier_off(tp);
17712 err = register_netdev(dev);
17714 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17715 goto err_out_apeunmap;
17718 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17719 tp->board_part_number,
17720 tg3_chip_rev_id(tp),
17721 tg3_bus_string(tp, str),
17724 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17725 struct phy_device *phydev;
17726 phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17728 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17729 phydev->drv->name, dev_name(&phydev->dev));
17733 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17734 ethtype = "10/100Base-TX";
17735 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17736 ethtype = "1000Base-SX";
17738 ethtype = "10/100/1000Base-T";
17740 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17741 "(WireSpeed[%d], EEE[%d])\n",
17742 tg3_phy_string(tp), ethtype,
17743 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17744 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17747 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17748 (dev->features & NETIF_F_RXCSUM) != 0,
17749 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17750 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17751 tg3_flag(tp, ENABLE_ASF) != 0,
17752 tg3_flag(tp, TSO_CAPABLE) != 0);
17753 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17755 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17756 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17758 pci_save_state(pdev);
17764 iounmap(tp->aperegs);
17765 tp->aperegs = NULL;
17778 pci_release_regions(pdev);
17780 err_out_disable_pdev:
17781 if (pci_is_enabled(pdev))
17782 pci_disable_device(pdev);
17786 static void tg3_remove_one(struct pci_dev *pdev)
17788 struct net_device *dev = pci_get_drvdata(pdev);
17791 struct tg3 *tp = netdev_priv(dev);
17793 release_firmware(tp->fw);
17795 tg3_reset_task_cancel(tp);
17797 if (tg3_flag(tp, USE_PHYLIB)) {
17802 unregister_netdev(dev);
17804 iounmap(tp->aperegs);
17805 tp->aperegs = NULL;
17812 pci_release_regions(pdev);
17813 pci_disable_device(pdev);
17817 #ifdef CONFIG_PM_SLEEP
17818 static int tg3_suspend(struct device *device)
17820 struct pci_dev *pdev = to_pci_dev(device);
17821 struct net_device *dev = pci_get_drvdata(pdev);
17822 struct tg3 *tp = netdev_priv(dev);
17827 if (!netif_running(dev))
17830 tg3_reset_task_cancel(tp);
17832 tg3_netif_stop(tp);
17834 tg3_timer_stop(tp);
17836 tg3_full_lock(tp, 1);
17837 tg3_disable_ints(tp);
17838 tg3_full_unlock(tp);
17840 netif_device_detach(dev);
17842 tg3_full_lock(tp, 0);
17843 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17844 tg3_flag_clear(tp, INIT_COMPLETE);
17845 tg3_full_unlock(tp);
17847 err = tg3_power_down_prepare(tp);
17851 tg3_full_lock(tp, 0);
17853 tg3_flag_set(tp, INIT_COMPLETE);
17854 err2 = tg3_restart_hw(tp, true);
17858 tg3_timer_start(tp);
17860 netif_device_attach(dev);
17861 tg3_netif_start(tp);
17864 tg3_full_unlock(tp);
17875 static int tg3_resume(struct device *device)
17877 struct pci_dev *pdev = to_pci_dev(device);
17878 struct net_device *dev = pci_get_drvdata(pdev);
17879 struct tg3 *tp = netdev_priv(dev);
17884 if (!netif_running(dev))
17887 netif_device_attach(dev);
17889 tg3_full_lock(tp, 0);
17891 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
17893 tg3_flag_set(tp, INIT_COMPLETE);
17894 err = tg3_restart_hw(tp,
17895 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17899 tg3_timer_start(tp);
17901 tg3_netif_start(tp);
17904 tg3_full_unlock(tp);
17913 #endif /* CONFIG_PM_SLEEP */
17915 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17917 static void tg3_shutdown(struct pci_dev *pdev)
17919 struct net_device *dev = pci_get_drvdata(pdev);
17920 struct tg3 *tp = netdev_priv(dev);
17923 netif_device_detach(dev);
17925 if (netif_running(dev))
17928 if (system_state == SYSTEM_POWER_OFF)
17929 tg3_power_down(tp);
17935 * tg3_io_error_detected - called when PCI error is detected
17936 * @pdev: Pointer to PCI device
17937 * @state: The current pci connection state
17939 * This function is called after a PCI bus error affecting
17940 * this device has been detected.
17942 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17943 pci_channel_state_t state)
17945 struct net_device *netdev = pci_get_drvdata(pdev);
17946 struct tg3 *tp = netdev_priv(netdev);
17947 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17949 netdev_info(netdev, "PCI I/O error detected\n");
17953 /* We probably don't have netdev yet */
17954 if (!netdev || !netif_running(netdev))
17959 tg3_netif_stop(tp);
17961 tg3_timer_stop(tp);
17963 /* Want to make sure that the reset task doesn't run */
17964 tg3_reset_task_cancel(tp);
17966 netif_device_detach(netdev);
17968 /* Clean up software state, even if MMIO is blocked */
17969 tg3_full_lock(tp, 0);
17970 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17971 tg3_full_unlock(tp);
17974 if (state == pci_channel_io_perm_failure) {
17976 tg3_napi_enable(tp);
17979 err = PCI_ERS_RESULT_DISCONNECT;
17981 pci_disable_device(pdev);
17990 * tg3_io_slot_reset - called after the pci bus has been reset.
17991 * @pdev: Pointer to PCI device
17993 * Restart the card from scratch, as if from a cold-boot.
17994 * At this point, the card has exprienced a hard reset,
17995 * followed by fixups by BIOS, and has its config space
17996 * set up identically to what it was at cold boot.
17998 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18000 struct net_device *netdev = pci_get_drvdata(pdev);
18001 struct tg3 *tp = netdev_priv(netdev);
18002 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18007 if (pci_enable_device(pdev)) {
18008 dev_err(&pdev->dev,
18009 "Cannot re-enable PCI device after reset.\n");
18013 pci_set_master(pdev);
18014 pci_restore_state(pdev);
18015 pci_save_state(pdev);
18017 if (!netdev || !netif_running(netdev)) {
18018 rc = PCI_ERS_RESULT_RECOVERED;
18022 err = tg3_power_up(tp);
18026 rc = PCI_ERS_RESULT_RECOVERED;
18029 if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18030 tg3_napi_enable(tp);
18039 * tg3_io_resume - called when traffic can start flowing again.
18040 * @pdev: Pointer to PCI device
18042 * This callback is called when the error recovery driver tells
18043 * us that its OK to resume normal operation.
18045 static void tg3_io_resume(struct pci_dev *pdev)
18047 struct net_device *netdev = pci_get_drvdata(pdev);
18048 struct tg3 *tp = netdev_priv(netdev);
18053 if (!netif_running(netdev))
18056 tg3_full_lock(tp, 0);
18057 tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18058 tg3_flag_set(tp, INIT_COMPLETE);
18059 err = tg3_restart_hw(tp, true);
18061 tg3_full_unlock(tp);
18062 netdev_err(netdev, "Cannot restart hardware after reset.\n");
18066 netif_device_attach(netdev);
18068 tg3_timer_start(tp);
18070 tg3_netif_start(tp);
18072 tg3_full_unlock(tp);
18080 static const struct pci_error_handlers tg3_err_handler = {
18081 .error_detected = tg3_io_error_detected,
18082 .slot_reset = tg3_io_slot_reset,
18083 .resume = tg3_io_resume
18086 static struct pci_driver tg3_driver = {
18087 .name = DRV_MODULE_NAME,
18088 .id_table = tg3_pci_tbl,
18089 .probe = tg3_init_one,
18090 .remove = tg3_remove_one,
18091 .err_handler = &tg3_err_handler,
18092 .driver.pm = &tg3_pm_ops,
18093 .shutdown = tg3_shutdown,
18096 module_pci_driver(tg3_driver);