]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/broadcom/tg3.c
tg3: PTP - Add header definitions, initialization and hw access functions.
[~andy/linux] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/hwmon.h>
48 #include <linux/hwmon-sysfs.h>
49
50 #include <net/checksum.h>
51 #include <net/ip.h>
52
53 #include <linux/io.h>
54 #include <asm/byteorder.h>
55 #include <linux/uaccess.h>
56
57 #include <uapi/linux/net_tstamp.h>
58 #include <linux/ptp_clock_kernel.h>
59
60 #ifdef CONFIG_SPARC
61 #include <asm/idprom.h>
62 #include <asm/prom.h>
63 #endif
64
65 #define BAR_0   0
66 #define BAR_2   2
67
68 #include "tg3.h"
69
70 /* Functions & macros to verify TG3_FLAGS types */
71
72 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         return test_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         set_bit(flag, bits);
80 }
81
82 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
83 {
84         clear_bit(flag, bits);
85 }
86
87 #define tg3_flag(tp, flag)                              \
88         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
89 #define tg3_flag_set(tp, flag)                          \
90         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
91 #define tg3_flag_clear(tp, flag)                        \
92         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
93
94 #define DRV_MODULE_NAME         "tg3"
95 #define TG3_MAJ_NUM                     3
96 #define TG3_MIN_NUM                     127
97 #define DRV_MODULE_VERSION      \
98         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
99 #define DRV_MODULE_RELDATE      "November 14, 2012"
100
101 #define RESET_KIND_SHUTDOWN     0
102 #define RESET_KIND_INIT         1
103 #define RESET_KIND_SUSPEND      2
104
105 #define TG3_DEF_RX_MODE         0
106 #define TG3_DEF_TX_MODE         0
107 #define TG3_DEF_MSG_ENABLE        \
108         (NETIF_MSG_DRV          | \
109          NETIF_MSG_PROBE        | \
110          NETIF_MSG_LINK         | \
111          NETIF_MSG_TIMER        | \
112          NETIF_MSG_IFDOWN       | \
113          NETIF_MSG_IFUP         | \
114          NETIF_MSG_RX_ERR       | \
115          NETIF_MSG_TX_ERR)
116
117 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
118
119 /* length of time before we decide the hardware is borked,
120  * and dev->tx_timeout() should be called to fix the problem
121  */
122
123 #define TG3_TX_TIMEOUT                  (5 * HZ)
124
125 /* hardware minimum and maximum for a single frame's data payload */
126 #define TG3_MIN_MTU                     60
127 #define TG3_MAX_MTU(tp) \
128         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
129
130 /* These numbers seem to be hard coded in the NIC firmware somehow.
131  * You can't change the ring sizes, but you can change where you place
132  * them in the NIC onboard memory.
133  */
134 #define TG3_RX_STD_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
137 #define TG3_DEF_RX_RING_PENDING         200
138 #define TG3_RX_JMB_RING_SIZE(tp) \
139         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
140          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
141 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
142
143 /* Do not place this n-ring entries value into the tp struct itself,
144  * we really want to expose these constants to GCC so that modulo et
145  * al.  operations are done with shifts and masks instead of with
146  * hw multiply/modulo instructions.  Another solution would be to
147  * replace things like '% foo' with '& (foo - 1)'.
148  */
149
150 #define TG3_TX_RING_SIZE                512
151 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
152
153 #define TG3_RX_STD_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
155 #define TG3_RX_JMB_RING_BYTES(tp) \
156         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
157 #define TG3_RX_RCB_RING_BYTES(tp) \
158         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
159 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
160                                  TG3_TX_RING_SIZE)
161 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
162
163 #define TG3_DMA_BYTE_ENAB               64
164
165 #define TG3_RX_STD_DMA_SZ               1536
166 #define TG3_RX_JMB_DMA_SZ               9046
167
168 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
169
170 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
171 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
172
173 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
174         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
175
176 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
177         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
178
179 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
180  * that are at least dword aligned when used in PCIX mode.  The driver
181  * works around this bug by double copying the packet.  This workaround
182  * is built into the normal double copy length check for efficiency.
183  *
184  * However, the double copy is only necessary on those architectures
185  * where unaligned memory accesses are inefficient.  For those architectures
186  * where unaligned memory accesses incur little penalty, we can reintegrate
187  * the 5701 in the normal rx path.  Doing so saves a device structure
188  * dereference by hardcoding the double copy threshold in place.
189  */
190 #define TG3_RX_COPY_THRESHOLD           256
191 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
192         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
193 #else
194         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
195 #endif
196
197 #if (NET_IP_ALIGN != 0)
198 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
199 #else
200 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
201 #endif
202
203 /* minimum number of free TX descriptors required to wake up TX process */
204 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
205 #define TG3_TX_BD_DMA_MAX_2K            2048
206 #define TG3_TX_BD_DMA_MAX_4K            4096
207
208 #define TG3_RAW_IP_ALIGN 2
209
210 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
211 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
212
213 #define FIRMWARE_TG3            "tigon/tg3.bin"
214 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
215 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
216
217 static char version[] =
218         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
219
220 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
221 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
222 MODULE_LICENSE("GPL");
223 MODULE_VERSION(DRV_MODULE_VERSION);
224 MODULE_FIRMWARE(FIRMWARE_TG3);
225 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
226 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
227
228 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
229 module_param(tg3_debug, int, 0);
230 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
231
232 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
233 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
234
235 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
255          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
256                         TG3_DRV_DATA_FLAG_5705_10_100},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
258          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
259                         TG3_DRV_DATA_FLAG_5705_10_100},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
262          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
263                         TG3_DRV_DATA_FLAG_5705_10_100},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
269          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
275          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
283         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
284                         PCI_VENDOR_ID_LENOVO,
285                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
286          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
308         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
309                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
310          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
315         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
316         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
317          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
320         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
327          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
329          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
330         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
332         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
333         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
334         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
335         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
336         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
337         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
338         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
339         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
340         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
341         {}
342 };
343
344 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
345
346 static const struct {
347         const char string[ETH_GSTRING_LEN];
348 } ethtool_stats_keys[] = {
349         { "rx_octets" },
350         { "rx_fragments" },
351         { "rx_ucast_packets" },
352         { "rx_mcast_packets" },
353         { "rx_bcast_packets" },
354         { "rx_fcs_errors" },
355         { "rx_align_errors" },
356         { "rx_xon_pause_rcvd" },
357         { "rx_xoff_pause_rcvd" },
358         { "rx_mac_ctrl_rcvd" },
359         { "rx_xoff_entered" },
360         { "rx_frame_too_long_errors" },
361         { "rx_jabbers" },
362         { "rx_undersize_packets" },
363         { "rx_in_length_errors" },
364         { "rx_out_length_errors" },
365         { "rx_64_or_less_octet_packets" },
366         { "rx_65_to_127_octet_packets" },
367         { "rx_128_to_255_octet_packets" },
368         { "rx_256_to_511_octet_packets" },
369         { "rx_512_to_1023_octet_packets" },
370         { "rx_1024_to_1522_octet_packets" },
371         { "rx_1523_to_2047_octet_packets" },
372         { "rx_2048_to_4095_octet_packets" },
373         { "rx_4096_to_8191_octet_packets" },
374         { "rx_8192_to_9022_octet_packets" },
375
376         { "tx_octets" },
377         { "tx_collisions" },
378
379         { "tx_xon_sent" },
380         { "tx_xoff_sent" },
381         { "tx_flow_control" },
382         { "tx_mac_errors" },
383         { "tx_single_collisions" },
384         { "tx_mult_collisions" },
385         { "tx_deferred" },
386         { "tx_excessive_collisions" },
387         { "tx_late_collisions" },
388         { "tx_collide_2times" },
389         { "tx_collide_3times" },
390         { "tx_collide_4times" },
391         { "tx_collide_5times" },
392         { "tx_collide_6times" },
393         { "tx_collide_7times" },
394         { "tx_collide_8times" },
395         { "tx_collide_9times" },
396         { "tx_collide_10times" },
397         { "tx_collide_11times" },
398         { "tx_collide_12times" },
399         { "tx_collide_13times" },
400         { "tx_collide_14times" },
401         { "tx_collide_15times" },
402         { "tx_ucast_packets" },
403         { "tx_mcast_packets" },
404         { "tx_bcast_packets" },
405         { "tx_carrier_sense_errors" },
406         { "tx_discards" },
407         { "tx_errors" },
408
409         { "dma_writeq_full" },
410         { "dma_write_prioq_full" },
411         { "rxbds_empty" },
412         { "rx_discards" },
413         { "rx_errors" },
414         { "rx_threshold_hit" },
415
416         { "dma_readq_full" },
417         { "dma_read_prioq_full" },
418         { "tx_comp_queue_full" },
419
420         { "ring_set_send_prod_index" },
421         { "ring_status_update" },
422         { "nic_irqs" },
423         { "nic_avoided_irqs" },
424         { "nic_tx_threshold_hit" },
425
426         { "mbuf_lwm_thresh_hit" },
427 };
428
429 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
430 #define TG3_NVRAM_TEST          0
431 #define TG3_LINK_TEST           1
432 #define TG3_REGISTER_TEST       2
433 #define TG3_MEMORY_TEST         3
434 #define TG3_MAC_LOOPB_TEST      4
435 #define TG3_PHY_LOOPB_TEST      5
436 #define TG3_EXT_LOOPB_TEST      6
437 #define TG3_INTERRUPT_TEST      7
438
439
440 static const struct {
441         const char string[ETH_GSTRING_LEN];
442 } ethtool_test_keys[] = {
443         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
444         [TG3_LINK_TEST]         = { "link test         (online) " },
445         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
446         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
447         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
448         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
449         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
450         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
451 };
452
453 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
454
455
456 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
457 {
458         writel(val, tp->regs + off);
459 }
460
461 static u32 tg3_read32(struct tg3 *tp, u32 off)
462 {
463         return readl(tp->regs + off);
464 }
465
466 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
467 {
468         writel(val, tp->aperegs + off);
469 }
470
471 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
472 {
473         return readl(tp->aperegs + off);
474 }
475
476 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
477 {
478         unsigned long flags;
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484 }
485
486 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
487 {
488         writel(val, tp->regs + off);
489         readl(tp->regs + off);
490 }
491
492 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
493 {
494         unsigned long flags;
495         u32 val;
496
497         spin_lock_irqsave(&tp->indirect_lock, flags);
498         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
499         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
500         spin_unlock_irqrestore(&tp->indirect_lock, flags);
501         return val;
502 }
503
504 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
505 {
506         unsigned long flags;
507
508         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
509                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
510                                        TG3_64BIT_REG_LOW, val);
511                 return;
512         }
513         if (off == TG3_RX_STD_PROD_IDX_REG) {
514                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
515                                        TG3_64BIT_REG_LOW, val);
516                 return;
517         }
518
519         spin_lock_irqsave(&tp->indirect_lock, flags);
520         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
521         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
522         spin_unlock_irqrestore(&tp->indirect_lock, flags);
523
524         /* In indirect mode when disabling interrupts, we also need
525          * to clear the interrupt bit in the GRC local ctrl register.
526          */
527         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
528             (val == 0x1)) {
529                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
530                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
531         }
532 }
533
534 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
535 {
536         unsigned long flags;
537         u32 val;
538
539         spin_lock_irqsave(&tp->indirect_lock, flags);
540         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
541         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
542         spin_unlock_irqrestore(&tp->indirect_lock, flags);
543         return val;
544 }
545
546 /* usec_wait specifies the wait time in usec when writing to certain registers
547  * where it is unsafe to read back the register without some delay.
548  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
549  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
550  */
551 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
552 {
553         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
554                 /* Non-posted methods */
555                 tp->write32(tp, off, val);
556         else {
557                 /* Posted method */
558                 tg3_write32(tp, off, val);
559                 if (usec_wait)
560                         udelay(usec_wait);
561                 tp->read32(tp, off);
562         }
563         /* Wait again after the read for the posted method to guarantee that
564          * the wait time is met.
565          */
566         if (usec_wait)
567                 udelay(usec_wait);
568 }
569
570 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
571 {
572         tp->write32_mbox(tp, off, val);
573         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
574                 tp->read32_mbox(tp, off);
575 }
576
577 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
578 {
579         void __iomem *mbox = tp->regs + off;
580         writel(val, mbox);
581         if (tg3_flag(tp, TXD_MBOX_HWBUG))
582                 writel(val, mbox);
583         if (tg3_flag(tp, MBOX_WRITE_REORDER))
584                 readl(mbox);
585 }
586
587 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
588 {
589         return readl(tp->regs + off + GRCMBOX_BASE);
590 }
591
592 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
593 {
594         writel(val, tp->regs + off + GRCMBOX_BASE);
595 }
596
597 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
598 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
599 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
600 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
601 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
602
603 #define tw32(reg, val)                  tp->write32(tp, reg, val)
604 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
605 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
606 #define tr32(reg)                       tp->read32(tp, reg)
607
608 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
609 {
610         unsigned long flags;
611
612         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
613             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
614                 return;
615
616         spin_lock_irqsave(&tp->indirect_lock, flags);
617         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
618                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
619                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
620
621                 /* Always leave this as zero. */
622                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
623         } else {
624                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
625                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
626
627                 /* Always leave this as zero. */
628                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
629         }
630         spin_unlock_irqrestore(&tp->indirect_lock, flags);
631 }
632
633 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
634 {
635         unsigned long flags;
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
638             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
639                 *val = 0;
640                 return;
641         }
642
643         spin_lock_irqsave(&tp->indirect_lock, flags);
644         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
645                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
646                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
647
648                 /* Always leave this as zero. */
649                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
650         } else {
651                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
652                 *val = tr32(TG3PCI_MEM_WIN_DATA);
653
654                 /* Always leave this as zero. */
655                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
656         }
657         spin_unlock_irqrestore(&tp->indirect_lock, flags);
658 }
659
660 static void tg3_ape_lock_init(struct tg3 *tp)
661 {
662         int i;
663         u32 regbase, bit;
664
665         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
666                 regbase = TG3_APE_LOCK_GRANT;
667         else
668                 regbase = TG3_APE_PER_LOCK_GRANT;
669
670         /* Make sure the driver hasn't any stale locks. */
671         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
672                 switch (i) {
673                 case TG3_APE_LOCK_PHY0:
674                 case TG3_APE_LOCK_PHY1:
675                 case TG3_APE_LOCK_PHY2:
676                 case TG3_APE_LOCK_PHY3:
677                         bit = APE_LOCK_GRANT_DRIVER;
678                         break;
679                 default:
680                         if (!tp->pci_fn)
681                                 bit = APE_LOCK_GRANT_DRIVER;
682                         else
683                                 bit = 1 << tp->pci_fn;
684                 }
685                 tg3_ape_write32(tp, regbase + 4 * i, bit);
686         }
687
688 }
689
690 static int tg3_ape_lock(struct tg3 *tp, int locknum)
691 {
692         int i, off;
693         int ret = 0;
694         u32 status, req, gnt, bit;
695
696         if (!tg3_flag(tp, ENABLE_APE))
697                 return 0;
698
699         switch (locknum) {
700         case TG3_APE_LOCK_GPIO:
701                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
702                         return 0;
703         case TG3_APE_LOCK_GRC:
704         case TG3_APE_LOCK_MEM:
705                 if (!tp->pci_fn)
706                         bit = APE_LOCK_REQ_DRIVER;
707                 else
708                         bit = 1 << tp->pci_fn;
709                 break;
710         case TG3_APE_LOCK_PHY0:
711         case TG3_APE_LOCK_PHY1:
712         case TG3_APE_LOCK_PHY2:
713         case TG3_APE_LOCK_PHY3:
714                 bit = APE_LOCK_REQ_DRIVER;
715                 break;
716         default:
717                 return -EINVAL;
718         }
719
720         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
721                 req = TG3_APE_LOCK_REQ;
722                 gnt = TG3_APE_LOCK_GRANT;
723         } else {
724                 req = TG3_APE_PER_LOCK_REQ;
725                 gnt = TG3_APE_PER_LOCK_GRANT;
726         }
727
728         off = 4 * locknum;
729
730         tg3_ape_write32(tp, req + off, bit);
731
732         /* Wait for up to 1 millisecond to acquire lock. */
733         for (i = 0; i < 100; i++) {
734                 status = tg3_ape_read32(tp, gnt + off);
735                 if (status == bit)
736                         break;
737                 udelay(10);
738         }
739
740         if (status != bit) {
741                 /* Revoke the lock request. */
742                 tg3_ape_write32(tp, gnt + off, bit);
743                 ret = -EBUSY;
744         }
745
746         return ret;
747 }
748
749 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
750 {
751         u32 gnt, bit;
752
753         if (!tg3_flag(tp, ENABLE_APE))
754                 return;
755
756         switch (locknum) {
757         case TG3_APE_LOCK_GPIO:
758                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
759                         return;
760         case TG3_APE_LOCK_GRC:
761         case TG3_APE_LOCK_MEM:
762                 if (!tp->pci_fn)
763                         bit = APE_LOCK_GRANT_DRIVER;
764                 else
765                         bit = 1 << tp->pci_fn;
766                 break;
767         case TG3_APE_LOCK_PHY0:
768         case TG3_APE_LOCK_PHY1:
769         case TG3_APE_LOCK_PHY2:
770         case TG3_APE_LOCK_PHY3:
771                 bit = APE_LOCK_GRANT_DRIVER;
772                 break;
773         default:
774                 return;
775         }
776
777         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
778                 gnt = TG3_APE_LOCK_GRANT;
779         else
780                 gnt = TG3_APE_PER_LOCK_GRANT;
781
782         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
783 }
784
785 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
786 {
787         u32 apedata;
788
789         while (timeout_us) {
790                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
791                         return -EBUSY;
792
793                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
794                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
795                         break;
796
797                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
798
799                 udelay(10);
800                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
801         }
802
803         return timeout_us ? 0 : -EBUSY;
804 }
805
806 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
807 {
808         u32 i, apedata;
809
810         for (i = 0; i < timeout_us / 10; i++) {
811                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
812
813                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
814                         break;
815
816                 udelay(10);
817         }
818
819         return i == timeout_us / 10;
820 }
821
822 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
823                                    u32 len)
824 {
825         int err;
826         u32 i, bufoff, msgoff, maxlen, apedata;
827
828         if (!tg3_flag(tp, APE_HAS_NCSI))
829                 return 0;
830
831         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
832         if (apedata != APE_SEG_SIG_MAGIC)
833                 return -ENODEV;
834
835         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
836         if (!(apedata & APE_FW_STATUS_READY))
837                 return -EAGAIN;
838
839         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
840                  TG3_APE_SHMEM_BASE;
841         msgoff = bufoff + 2 * sizeof(u32);
842         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
843
844         while (len) {
845                 u32 length;
846
847                 /* Cap xfer sizes to scratchpad limits. */
848                 length = (len > maxlen) ? maxlen : len;
849                 len -= length;
850
851                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
852                 if (!(apedata & APE_FW_STATUS_READY))
853                         return -EAGAIN;
854
855                 /* Wait for up to 1 msec for APE to service previous event. */
856                 err = tg3_ape_event_lock(tp, 1000);
857                 if (err)
858                         return err;
859
860                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
861                           APE_EVENT_STATUS_SCRTCHPD_READ |
862                           APE_EVENT_STATUS_EVENT_PENDING;
863                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
864
865                 tg3_ape_write32(tp, bufoff, base_off);
866                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
867
868                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
869                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
870
871                 base_off += length;
872
873                 if (tg3_ape_wait_for_event(tp, 30000))
874                         return -EAGAIN;
875
876                 for (i = 0; length; i += 4, length -= 4) {
877                         u32 val = tg3_ape_read32(tp, msgoff + i);
878                         memcpy(data, &val, sizeof(u32));
879                         data++;
880                 }
881         }
882
883         return 0;
884 }
885
886 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
887 {
888         int err;
889         u32 apedata;
890
891         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
892         if (apedata != APE_SEG_SIG_MAGIC)
893                 return -EAGAIN;
894
895         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
896         if (!(apedata & APE_FW_STATUS_READY))
897                 return -EAGAIN;
898
899         /* Wait for up to 1 millisecond for APE to service previous event. */
900         err = tg3_ape_event_lock(tp, 1000);
901         if (err)
902                 return err;
903
904         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
905                         event | APE_EVENT_STATUS_EVENT_PENDING);
906
907         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
908         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
909
910         return 0;
911 }
912
913 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
914 {
915         u32 event;
916         u32 apedata;
917
918         if (!tg3_flag(tp, ENABLE_APE))
919                 return;
920
921         switch (kind) {
922         case RESET_KIND_INIT:
923                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
924                                 APE_HOST_SEG_SIG_MAGIC);
925                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
926                                 APE_HOST_SEG_LEN_MAGIC);
927                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
928                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
929                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
930                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
931                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
932                                 APE_HOST_BEHAV_NO_PHYLOCK);
933                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
934                                     TG3_APE_HOST_DRVR_STATE_START);
935
936                 event = APE_EVENT_STATUS_STATE_START;
937                 break;
938         case RESET_KIND_SHUTDOWN:
939                 /* With the interface we are currently using,
940                  * APE does not track driver state.  Wiping
941                  * out the HOST SEGMENT SIGNATURE forces
942                  * the APE to assume OS absent status.
943                  */
944                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
945
946                 if (device_may_wakeup(&tp->pdev->dev) &&
947                     tg3_flag(tp, WOL_ENABLE)) {
948                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
949                                             TG3_APE_HOST_WOL_SPEED_AUTO);
950                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
951                 } else
952                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
953
954                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
955
956                 event = APE_EVENT_STATUS_STATE_UNLOAD;
957                 break;
958         case RESET_KIND_SUSPEND:
959                 event = APE_EVENT_STATUS_STATE_SUSPEND;
960                 break;
961         default:
962                 return;
963         }
964
965         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
966
967         tg3_ape_send_event(tp, event);
968 }
969
970 static void tg3_disable_ints(struct tg3 *tp)
971 {
972         int i;
973
974         tw32(TG3PCI_MISC_HOST_CTRL,
975              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
976         for (i = 0; i < tp->irq_max; i++)
977                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
978 }
979
980 static void tg3_enable_ints(struct tg3 *tp)
981 {
982         int i;
983
984         tp->irq_sync = 0;
985         wmb();
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
989
990         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
991         for (i = 0; i < tp->irq_cnt; i++) {
992                 struct tg3_napi *tnapi = &tp->napi[i];
993
994                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
995                 if (tg3_flag(tp, 1SHOT_MSI))
996                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
997
998                 tp->coal_now |= tnapi->coal_now;
999         }
1000
1001         /* Force an initial interrupt */
1002         if (!tg3_flag(tp, TAGGED_STATUS) &&
1003             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1004                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1005         else
1006                 tw32(HOSTCC_MODE, tp->coal_now);
1007
1008         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1009 }
1010
1011 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1012 {
1013         struct tg3 *tp = tnapi->tp;
1014         struct tg3_hw_status *sblk = tnapi->hw_status;
1015         unsigned int work_exists = 0;
1016
1017         /* check for phy events */
1018         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1019                 if (sblk->status & SD_STATUS_LINK_CHG)
1020                         work_exists = 1;
1021         }
1022
1023         /* check for TX work to do */
1024         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1025                 work_exists = 1;
1026
1027         /* check for RX work to do */
1028         if (tnapi->rx_rcb_prod_idx &&
1029             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1030                 work_exists = 1;
1031
1032         return work_exists;
1033 }
1034
1035 /* tg3_int_reenable
1036  *  similar to tg3_enable_ints, but it accurately determines whether there
1037  *  is new work pending and can return without flushing the PIO write
1038  *  which reenables interrupts
1039  */
1040 static void tg3_int_reenable(struct tg3_napi *tnapi)
1041 {
1042         struct tg3 *tp = tnapi->tp;
1043
1044         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1045         mmiowb();
1046
1047         /* When doing tagged status, this work check is unnecessary.
1048          * The last_tag we write above tells the chip which piece of
1049          * work we've completed.
1050          */
1051         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1052                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1053                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1054 }
1055
1056 static void tg3_switch_clocks(struct tg3 *tp)
1057 {
1058         u32 clock_ctrl;
1059         u32 orig_clock_ctrl;
1060
1061         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1062                 return;
1063
1064         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1065
1066         orig_clock_ctrl = clock_ctrl;
1067         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1068                        CLOCK_CTRL_CLKRUN_OENABLE |
1069                        0x1f);
1070         tp->pci_clock_ctrl = clock_ctrl;
1071
1072         if (tg3_flag(tp, 5705_PLUS)) {
1073                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1074                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1075                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1076                 }
1077         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1078                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1079                             clock_ctrl |
1080                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1081                             40);
1082                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1083                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1084                             40);
1085         }
1086         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1087 }
1088
1089 #define PHY_BUSY_LOOPS  5000
1090
1091 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1092 {
1093         u32 frame_val;
1094         unsigned int loops;
1095         int ret;
1096
1097         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1098                 tw32_f(MAC_MI_MODE,
1099                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1100                 udelay(80);
1101         }
1102
1103         tg3_ape_lock(tp, tp->phy_ape_lock);
1104
1105         *val = 0x0;
1106
1107         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1108                       MI_COM_PHY_ADDR_MASK);
1109         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1110                       MI_COM_REG_ADDR_MASK);
1111         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1112
1113         tw32_f(MAC_MI_COM, frame_val);
1114
1115         loops = PHY_BUSY_LOOPS;
1116         while (loops != 0) {
1117                 udelay(10);
1118                 frame_val = tr32(MAC_MI_COM);
1119
1120                 if ((frame_val & MI_COM_BUSY) == 0) {
1121                         udelay(5);
1122                         frame_val = tr32(MAC_MI_COM);
1123                         break;
1124                 }
1125                 loops -= 1;
1126         }
1127
1128         ret = -EBUSY;
1129         if (loops != 0) {
1130                 *val = frame_val & MI_COM_DATA_MASK;
1131                 ret = 0;
1132         }
1133
1134         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1135                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1136                 udelay(80);
1137         }
1138
1139         tg3_ape_unlock(tp, tp->phy_ape_lock);
1140
1141         return ret;
1142 }
1143
1144 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1145 {
1146         u32 frame_val;
1147         unsigned int loops;
1148         int ret;
1149
1150         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1151             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1152                 return 0;
1153
1154         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1155                 tw32_f(MAC_MI_MODE,
1156                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1157                 udelay(80);
1158         }
1159
1160         tg3_ape_lock(tp, tp->phy_ape_lock);
1161
1162         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1163                       MI_COM_PHY_ADDR_MASK);
1164         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1165                       MI_COM_REG_ADDR_MASK);
1166         frame_val |= (val & MI_COM_DATA_MASK);
1167         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1168
1169         tw32_f(MAC_MI_COM, frame_val);
1170
1171         loops = PHY_BUSY_LOOPS;
1172         while (loops != 0) {
1173                 udelay(10);
1174                 frame_val = tr32(MAC_MI_COM);
1175                 if ((frame_val & MI_COM_BUSY) == 0) {
1176                         udelay(5);
1177                         frame_val = tr32(MAC_MI_COM);
1178                         break;
1179                 }
1180                 loops -= 1;
1181         }
1182
1183         ret = -EBUSY;
1184         if (loops != 0)
1185                 ret = 0;
1186
1187         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1188                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1189                 udelay(80);
1190         }
1191
1192         tg3_ape_unlock(tp, tp->phy_ape_lock);
1193
1194         return ret;
1195 }
1196
1197 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1198 {
1199         int err;
1200
1201         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1202         if (err)
1203                 goto done;
1204
1205         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1206         if (err)
1207                 goto done;
1208
1209         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1210                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1211         if (err)
1212                 goto done;
1213
1214         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1215
1216 done:
1217         return err;
1218 }
1219
1220 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1221 {
1222         int err;
1223
1224         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1225         if (err)
1226                 goto done;
1227
1228         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1229         if (err)
1230                 goto done;
1231
1232         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1233                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1234         if (err)
1235                 goto done;
1236
1237         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1238
1239 done:
1240         return err;
1241 }
1242
1243 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1244 {
1245         int err;
1246
1247         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1248         if (!err)
1249                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1250
1251         return err;
1252 }
1253
1254 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1255 {
1256         int err;
1257
1258         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1259         if (!err)
1260                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1261
1262         return err;
1263 }
1264
1265 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1266 {
1267         int err;
1268
1269         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1270                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1271                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1272         if (!err)
1273                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1274
1275         return err;
1276 }
1277
1278 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1279 {
1280         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1281                 set |= MII_TG3_AUXCTL_MISC_WREN;
1282
1283         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1284 }
1285
1286 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1287         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1288                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1289                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1290
1291 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1292         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1293                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1294
1295 static int tg3_bmcr_reset(struct tg3 *tp)
1296 {
1297         u32 phy_control;
1298         int limit, err;
1299
1300         /* OK, reset it, and poll the BMCR_RESET bit until it
1301          * clears or we time out.
1302          */
1303         phy_control = BMCR_RESET;
1304         err = tg3_writephy(tp, MII_BMCR, phy_control);
1305         if (err != 0)
1306                 return -EBUSY;
1307
1308         limit = 5000;
1309         while (limit--) {
1310                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1311                 if (err != 0)
1312                         return -EBUSY;
1313
1314                 if ((phy_control & BMCR_RESET) == 0) {
1315                         udelay(40);
1316                         break;
1317                 }
1318                 udelay(10);
1319         }
1320         if (limit < 0)
1321                 return -EBUSY;
1322
1323         return 0;
1324 }
1325
1326 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1327 {
1328         struct tg3 *tp = bp->priv;
1329         u32 val;
1330
1331         spin_lock_bh(&tp->lock);
1332
1333         if (tg3_readphy(tp, reg, &val))
1334                 val = -EIO;
1335
1336         spin_unlock_bh(&tp->lock);
1337
1338         return val;
1339 }
1340
1341 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1342 {
1343         struct tg3 *tp = bp->priv;
1344         u32 ret = 0;
1345
1346         spin_lock_bh(&tp->lock);
1347
1348         if (tg3_writephy(tp, reg, val))
1349                 ret = -EIO;
1350
1351         spin_unlock_bh(&tp->lock);
1352
1353         return ret;
1354 }
1355
1356 static int tg3_mdio_reset(struct mii_bus *bp)
1357 {
1358         return 0;
1359 }
1360
1361 static void tg3_mdio_config_5785(struct tg3 *tp)
1362 {
1363         u32 val;
1364         struct phy_device *phydev;
1365
1366         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1367         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1368         case PHY_ID_BCM50610:
1369         case PHY_ID_BCM50610M:
1370                 val = MAC_PHYCFG2_50610_LED_MODES;
1371                 break;
1372         case PHY_ID_BCMAC131:
1373                 val = MAC_PHYCFG2_AC131_LED_MODES;
1374                 break;
1375         case PHY_ID_RTL8211C:
1376                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1377                 break;
1378         case PHY_ID_RTL8201E:
1379                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1380                 break;
1381         default:
1382                 return;
1383         }
1384
1385         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1386                 tw32(MAC_PHYCFG2, val);
1387
1388                 val = tr32(MAC_PHYCFG1);
1389                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1390                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1391                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1392                 tw32(MAC_PHYCFG1, val);
1393
1394                 return;
1395         }
1396
1397         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1398                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1399                        MAC_PHYCFG2_FMODE_MASK_MASK |
1400                        MAC_PHYCFG2_GMODE_MASK_MASK |
1401                        MAC_PHYCFG2_ACT_MASK_MASK   |
1402                        MAC_PHYCFG2_QUAL_MASK_MASK |
1403                        MAC_PHYCFG2_INBAND_ENABLE;
1404
1405         tw32(MAC_PHYCFG2, val);
1406
1407         val = tr32(MAC_PHYCFG1);
1408         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1409                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1410         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1411                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1412                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1413                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1414                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1415         }
1416         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1417                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1418         tw32(MAC_PHYCFG1, val);
1419
1420         val = tr32(MAC_EXT_RGMII_MODE);
1421         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1422                  MAC_RGMII_MODE_RX_QUALITY |
1423                  MAC_RGMII_MODE_RX_ACTIVITY |
1424                  MAC_RGMII_MODE_RX_ENG_DET |
1425                  MAC_RGMII_MODE_TX_ENABLE |
1426                  MAC_RGMII_MODE_TX_LOWPWR |
1427                  MAC_RGMII_MODE_TX_RESET);
1428         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1429                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1430                         val |= MAC_RGMII_MODE_RX_INT_B |
1431                                MAC_RGMII_MODE_RX_QUALITY |
1432                                MAC_RGMII_MODE_RX_ACTIVITY |
1433                                MAC_RGMII_MODE_RX_ENG_DET;
1434                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1435                         val |= MAC_RGMII_MODE_TX_ENABLE |
1436                                MAC_RGMII_MODE_TX_LOWPWR |
1437                                MAC_RGMII_MODE_TX_RESET;
1438         }
1439         tw32(MAC_EXT_RGMII_MODE, val);
1440 }
1441
1442 static void tg3_mdio_start(struct tg3 *tp)
1443 {
1444         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1445         tw32_f(MAC_MI_MODE, tp->mi_mode);
1446         udelay(80);
1447
1448         if (tg3_flag(tp, MDIOBUS_INITED) &&
1449             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1450                 tg3_mdio_config_5785(tp);
1451 }
1452
1453 static int tg3_mdio_init(struct tg3 *tp)
1454 {
1455         int i;
1456         u32 reg;
1457         struct phy_device *phydev;
1458
1459         if (tg3_flag(tp, 5717_PLUS)) {
1460                 u32 is_serdes;
1461
1462                 tp->phy_addr = tp->pci_fn + 1;
1463
1464                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1465                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1466                 else
1467                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1468                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1469                 if (is_serdes)
1470                         tp->phy_addr += 7;
1471         } else
1472                 tp->phy_addr = TG3_PHY_MII_ADDR;
1473
1474         tg3_mdio_start(tp);
1475
1476         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1477                 return 0;
1478
1479         tp->mdio_bus = mdiobus_alloc();
1480         if (tp->mdio_bus == NULL)
1481                 return -ENOMEM;
1482
1483         tp->mdio_bus->name     = "tg3 mdio bus";
1484         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1485                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1486         tp->mdio_bus->priv     = tp;
1487         tp->mdio_bus->parent   = &tp->pdev->dev;
1488         tp->mdio_bus->read     = &tg3_mdio_read;
1489         tp->mdio_bus->write    = &tg3_mdio_write;
1490         tp->mdio_bus->reset    = &tg3_mdio_reset;
1491         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1492         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1493
1494         for (i = 0; i < PHY_MAX_ADDR; i++)
1495                 tp->mdio_bus->irq[i] = PHY_POLL;
1496
1497         /* The bus registration will look for all the PHYs on the mdio bus.
1498          * Unfortunately, it does not ensure the PHY is powered up before
1499          * accessing the PHY ID registers.  A chip reset is the
1500          * quickest way to bring the device back to an operational state..
1501          */
1502         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1503                 tg3_bmcr_reset(tp);
1504
1505         i = mdiobus_register(tp->mdio_bus);
1506         if (i) {
1507                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1508                 mdiobus_free(tp->mdio_bus);
1509                 return i;
1510         }
1511
1512         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1513
1514         if (!phydev || !phydev->drv) {
1515                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1516                 mdiobus_unregister(tp->mdio_bus);
1517                 mdiobus_free(tp->mdio_bus);
1518                 return -ENODEV;
1519         }
1520
1521         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1522         case PHY_ID_BCM57780:
1523                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1524                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1525                 break;
1526         case PHY_ID_BCM50610:
1527         case PHY_ID_BCM50610M:
1528                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1529                                      PHY_BRCM_RX_REFCLK_UNUSED |
1530                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1531                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1532                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1533                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1534                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1535                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1536                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1537                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1538                 /* fallthru */
1539         case PHY_ID_RTL8211C:
1540                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1541                 break;
1542         case PHY_ID_RTL8201E:
1543         case PHY_ID_BCMAC131:
1544                 phydev->interface = PHY_INTERFACE_MODE_MII;
1545                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1546                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1547                 break;
1548         }
1549
1550         tg3_flag_set(tp, MDIOBUS_INITED);
1551
1552         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1553                 tg3_mdio_config_5785(tp);
1554
1555         return 0;
1556 }
1557
1558 static void tg3_mdio_fini(struct tg3 *tp)
1559 {
1560         if (tg3_flag(tp, MDIOBUS_INITED)) {
1561                 tg3_flag_clear(tp, MDIOBUS_INITED);
1562                 mdiobus_unregister(tp->mdio_bus);
1563                 mdiobus_free(tp->mdio_bus);
1564         }
1565 }
1566
1567 /* tp->lock is held. */
1568 static inline void tg3_generate_fw_event(struct tg3 *tp)
1569 {
1570         u32 val;
1571
1572         val = tr32(GRC_RX_CPU_EVENT);
1573         val |= GRC_RX_CPU_DRIVER_EVENT;
1574         tw32_f(GRC_RX_CPU_EVENT, val);
1575
1576         tp->last_event_jiffies = jiffies;
1577 }
1578
1579 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1580
1581 /* tp->lock is held. */
1582 static void tg3_wait_for_event_ack(struct tg3 *tp)
1583 {
1584         int i;
1585         unsigned int delay_cnt;
1586         long time_remain;
1587
1588         /* If enough time has passed, no wait is necessary. */
1589         time_remain = (long)(tp->last_event_jiffies + 1 +
1590                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1591                       (long)jiffies;
1592         if (time_remain < 0)
1593                 return;
1594
1595         /* Check if we can shorten the wait time. */
1596         delay_cnt = jiffies_to_usecs(time_remain);
1597         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1598                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1599         delay_cnt = (delay_cnt >> 3) + 1;
1600
1601         for (i = 0; i < delay_cnt; i++) {
1602                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1603                         break;
1604                 udelay(8);
1605         }
1606 }
1607
1608 /* tp->lock is held. */
1609 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1610 {
1611         u32 reg, val;
1612
1613         val = 0;
1614         if (!tg3_readphy(tp, MII_BMCR, &reg))
1615                 val = reg << 16;
1616         if (!tg3_readphy(tp, MII_BMSR, &reg))
1617                 val |= (reg & 0xffff);
1618         *data++ = val;
1619
1620         val = 0;
1621         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1622                 val = reg << 16;
1623         if (!tg3_readphy(tp, MII_LPA, &reg))
1624                 val |= (reg & 0xffff);
1625         *data++ = val;
1626
1627         val = 0;
1628         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1629                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1630                         val = reg << 16;
1631                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1632                         val |= (reg & 0xffff);
1633         }
1634         *data++ = val;
1635
1636         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1637                 val = reg << 16;
1638         else
1639                 val = 0;
1640         *data++ = val;
1641 }
1642
1643 /* tp->lock is held. */
1644 static void tg3_ump_link_report(struct tg3 *tp)
1645 {
1646         u32 data[4];
1647
1648         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1649                 return;
1650
1651         tg3_phy_gather_ump_data(tp, data);
1652
1653         tg3_wait_for_event_ack(tp);
1654
1655         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1656         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1657         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1658         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1659         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1660         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1661
1662         tg3_generate_fw_event(tp);
1663 }
1664
1665 /* tp->lock is held. */
1666 static void tg3_stop_fw(struct tg3 *tp)
1667 {
1668         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1669                 /* Wait for RX cpu to ACK the previous event. */
1670                 tg3_wait_for_event_ack(tp);
1671
1672                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1673
1674                 tg3_generate_fw_event(tp);
1675
1676                 /* Wait for RX cpu to ACK this event. */
1677                 tg3_wait_for_event_ack(tp);
1678         }
1679 }
1680
1681 /* tp->lock is held. */
1682 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1683 {
1684         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1685                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1686
1687         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1688                 switch (kind) {
1689                 case RESET_KIND_INIT:
1690                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1691                                       DRV_STATE_START);
1692                         break;
1693
1694                 case RESET_KIND_SHUTDOWN:
1695                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1696                                       DRV_STATE_UNLOAD);
1697                         break;
1698
1699                 case RESET_KIND_SUSPEND:
1700                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1701                                       DRV_STATE_SUSPEND);
1702                         break;
1703
1704                 default:
1705                         break;
1706                 }
1707         }
1708
1709         if (kind == RESET_KIND_INIT ||
1710             kind == RESET_KIND_SUSPEND)
1711                 tg3_ape_driver_state_change(tp, kind);
1712 }
1713
1714 /* tp->lock is held. */
1715 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1716 {
1717         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1718                 switch (kind) {
1719                 case RESET_KIND_INIT:
1720                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1721                                       DRV_STATE_START_DONE);
1722                         break;
1723
1724                 case RESET_KIND_SHUTDOWN:
1725                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1726                                       DRV_STATE_UNLOAD_DONE);
1727                         break;
1728
1729                 default:
1730                         break;
1731                 }
1732         }
1733
1734         if (kind == RESET_KIND_SHUTDOWN)
1735                 tg3_ape_driver_state_change(tp, kind);
1736 }
1737
1738 /* tp->lock is held. */
1739 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1740 {
1741         if (tg3_flag(tp, ENABLE_ASF)) {
1742                 switch (kind) {
1743                 case RESET_KIND_INIT:
1744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1745                                       DRV_STATE_START);
1746                         break;
1747
1748                 case RESET_KIND_SHUTDOWN:
1749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1750                                       DRV_STATE_UNLOAD);
1751                         break;
1752
1753                 case RESET_KIND_SUSPEND:
1754                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1755                                       DRV_STATE_SUSPEND);
1756                         break;
1757
1758                 default:
1759                         break;
1760                 }
1761         }
1762 }
1763
1764 static int tg3_poll_fw(struct tg3 *tp)
1765 {
1766         int i;
1767         u32 val;
1768
1769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1770                 /* Wait up to 20ms for init done. */
1771                 for (i = 0; i < 200; i++) {
1772                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1773                                 return 0;
1774                         udelay(100);
1775                 }
1776                 return -ENODEV;
1777         }
1778
1779         /* Wait for firmware initialization to complete. */
1780         for (i = 0; i < 100000; i++) {
1781                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1782                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1783                         break;
1784                 udelay(10);
1785         }
1786
1787         /* Chip might not be fitted with firmware.  Some Sun onboard
1788          * parts are configured like that.  So don't signal the timeout
1789          * of the above loop as an error, but do report the lack of
1790          * running firmware once.
1791          */
1792         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1793                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1794
1795                 netdev_info(tp->dev, "No firmware running\n");
1796         }
1797
1798         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1799                 /* The 57765 A0 needs a little more
1800                  * time to do some important work.
1801                  */
1802                 mdelay(10);
1803         }
1804
1805         return 0;
1806 }
1807
1808 static void tg3_link_report(struct tg3 *tp)
1809 {
1810         if (!netif_carrier_ok(tp->dev)) {
1811                 netif_info(tp, link, tp->dev, "Link is down\n");
1812                 tg3_ump_link_report(tp);
1813         } else if (netif_msg_link(tp)) {
1814                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1815                             (tp->link_config.active_speed == SPEED_1000 ?
1816                              1000 :
1817                              (tp->link_config.active_speed == SPEED_100 ?
1818                               100 : 10)),
1819                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1820                              "full" : "half"));
1821
1822                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1823                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1824                             "on" : "off",
1825                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1826                             "on" : "off");
1827
1828                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1829                         netdev_info(tp->dev, "EEE is %s\n",
1830                                     tp->setlpicnt ? "enabled" : "disabled");
1831
1832                 tg3_ump_link_report(tp);
1833         }
1834 }
1835
1836 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1837 {
1838         u16 miireg;
1839
1840         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1841                 miireg = ADVERTISE_1000XPAUSE;
1842         else if (flow_ctrl & FLOW_CTRL_TX)
1843                 miireg = ADVERTISE_1000XPSE_ASYM;
1844         else if (flow_ctrl & FLOW_CTRL_RX)
1845                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1846         else
1847                 miireg = 0;
1848
1849         return miireg;
1850 }
1851
1852 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1853 {
1854         u8 cap = 0;
1855
1856         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1857                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1858         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1859                 if (lcladv & ADVERTISE_1000XPAUSE)
1860                         cap = FLOW_CTRL_RX;
1861                 if (rmtadv & ADVERTISE_1000XPAUSE)
1862                         cap = FLOW_CTRL_TX;
1863         }
1864
1865         return cap;
1866 }
1867
1868 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1869 {
1870         u8 autoneg;
1871         u8 flowctrl = 0;
1872         u32 old_rx_mode = tp->rx_mode;
1873         u32 old_tx_mode = tp->tx_mode;
1874
1875         if (tg3_flag(tp, USE_PHYLIB))
1876                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1877         else
1878                 autoneg = tp->link_config.autoneg;
1879
1880         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1881                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1882                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1883                 else
1884                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1885         } else
1886                 flowctrl = tp->link_config.flowctrl;
1887
1888         tp->link_config.active_flowctrl = flowctrl;
1889
1890         if (flowctrl & FLOW_CTRL_RX)
1891                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1892         else
1893                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1894
1895         if (old_rx_mode != tp->rx_mode)
1896                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1897
1898         if (flowctrl & FLOW_CTRL_TX)
1899                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1900         else
1901                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1902
1903         if (old_tx_mode != tp->tx_mode)
1904                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1905 }
1906
1907 static void tg3_adjust_link(struct net_device *dev)
1908 {
1909         u8 oldflowctrl, linkmesg = 0;
1910         u32 mac_mode, lcl_adv, rmt_adv;
1911         struct tg3 *tp = netdev_priv(dev);
1912         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1913
1914         spin_lock_bh(&tp->lock);
1915
1916         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1917                                     MAC_MODE_HALF_DUPLEX);
1918
1919         oldflowctrl = tp->link_config.active_flowctrl;
1920
1921         if (phydev->link) {
1922                 lcl_adv = 0;
1923                 rmt_adv = 0;
1924
1925                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1926                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1927                 else if (phydev->speed == SPEED_1000 ||
1928                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1929                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1930                 else
1931                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1932
1933                 if (phydev->duplex == DUPLEX_HALF)
1934                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1935                 else {
1936                         lcl_adv = mii_advertise_flowctrl(
1937                                   tp->link_config.flowctrl);
1938
1939                         if (phydev->pause)
1940                                 rmt_adv = LPA_PAUSE_CAP;
1941                         if (phydev->asym_pause)
1942                                 rmt_adv |= LPA_PAUSE_ASYM;
1943                 }
1944
1945                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1946         } else
1947                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1948
1949         if (mac_mode != tp->mac_mode) {
1950                 tp->mac_mode = mac_mode;
1951                 tw32_f(MAC_MODE, tp->mac_mode);
1952                 udelay(40);
1953         }
1954
1955         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1956                 if (phydev->speed == SPEED_10)
1957                         tw32(MAC_MI_STAT,
1958                              MAC_MI_STAT_10MBPS_MODE |
1959                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1960                 else
1961                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1962         }
1963
1964         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1965                 tw32(MAC_TX_LENGTHS,
1966                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1967                       (6 << TX_LENGTHS_IPG_SHIFT) |
1968                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1969         else
1970                 tw32(MAC_TX_LENGTHS,
1971                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1972                       (6 << TX_LENGTHS_IPG_SHIFT) |
1973                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1974
1975         if (phydev->link != tp->old_link ||
1976             phydev->speed != tp->link_config.active_speed ||
1977             phydev->duplex != tp->link_config.active_duplex ||
1978             oldflowctrl != tp->link_config.active_flowctrl)
1979                 linkmesg = 1;
1980
1981         tp->old_link = phydev->link;
1982         tp->link_config.active_speed = phydev->speed;
1983         tp->link_config.active_duplex = phydev->duplex;
1984
1985         spin_unlock_bh(&tp->lock);
1986
1987         if (linkmesg)
1988                 tg3_link_report(tp);
1989 }
1990
1991 static int tg3_phy_init(struct tg3 *tp)
1992 {
1993         struct phy_device *phydev;
1994
1995         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1996                 return 0;
1997
1998         /* Bring the PHY back to a known state. */
1999         tg3_bmcr_reset(tp);
2000
2001         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2002
2003         /* Attach the MAC to the PHY. */
2004         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
2005                              phydev->dev_flags, phydev->interface);
2006         if (IS_ERR(phydev)) {
2007                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2008                 return PTR_ERR(phydev);
2009         }
2010
2011         /* Mask with MAC supported features. */
2012         switch (phydev->interface) {
2013         case PHY_INTERFACE_MODE_GMII:
2014         case PHY_INTERFACE_MODE_RGMII:
2015                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2016                         phydev->supported &= (PHY_GBIT_FEATURES |
2017                                               SUPPORTED_Pause |
2018                                               SUPPORTED_Asym_Pause);
2019                         break;
2020                 }
2021                 /* fallthru */
2022         case PHY_INTERFACE_MODE_MII:
2023                 phydev->supported &= (PHY_BASIC_FEATURES |
2024                                       SUPPORTED_Pause |
2025                                       SUPPORTED_Asym_Pause);
2026                 break;
2027         default:
2028                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2029                 return -EINVAL;
2030         }
2031
2032         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2033
2034         phydev->advertising = phydev->supported;
2035
2036         return 0;
2037 }
2038
2039 static void tg3_phy_start(struct tg3 *tp)
2040 {
2041         struct phy_device *phydev;
2042
2043         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2044                 return;
2045
2046         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2047
2048         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2049                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2050                 phydev->speed = tp->link_config.speed;
2051                 phydev->duplex = tp->link_config.duplex;
2052                 phydev->autoneg = tp->link_config.autoneg;
2053                 phydev->advertising = tp->link_config.advertising;
2054         }
2055
2056         phy_start(phydev);
2057
2058         phy_start_aneg(phydev);
2059 }
2060
2061 static void tg3_phy_stop(struct tg3 *tp)
2062 {
2063         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2064                 return;
2065
2066         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2067 }
2068
2069 static void tg3_phy_fini(struct tg3 *tp)
2070 {
2071         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2072                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2073                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2074         }
2075 }
2076
2077 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2078 {
2079         int err;
2080         u32 val;
2081
2082         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2083                 return 0;
2084
2085         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2086                 /* Cannot do read-modify-write on 5401 */
2087                 err = tg3_phy_auxctl_write(tp,
2088                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2089                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2090                                            0x4c20);
2091                 goto done;
2092         }
2093
2094         err = tg3_phy_auxctl_read(tp,
2095                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2096         if (err)
2097                 return err;
2098
2099         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2100         err = tg3_phy_auxctl_write(tp,
2101                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2102
2103 done:
2104         return err;
2105 }
2106
2107 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2108 {
2109         u32 phytest;
2110
2111         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2112                 u32 phy;
2113
2114                 tg3_writephy(tp, MII_TG3_FET_TEST,
2115                              phytest | MII_TG3_FET_SHADOW_EN);
2116                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2117                         if (enable)
2118                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2119                         else
2120                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2121                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2122                 }
2123                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2124         }
2125 }
2126
2127 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2128 {
2129         u32 reg;
2130
2131         if (!tg3_flag(tp, 5705_PLUS) ||
2132             (tg3_flag(tp, 5717_PLUS) &&
2133              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2134                 return;
2135
2136         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2137                 tg3_phy_fet_toggle_apd(tp, enable);
2138                 return;
2139         }
2140
2141         reg = MII_TG3_MISC_SHDW_WREN |
2142               MII_TG3_MISC_SHDW_SCR5_SEL |
2143               MII_TG3_MISC_SHDW_SCR5_LPED |
2144               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2145               MII_TG3_MISC_SHDW_SCR5_SDTL |
2146               MII_TG3_MISC_SHDW_SCR5_C125OE;
2147         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
2148                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2149
2150         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2151
2152
2153         reg = MII_TG3_MISC_SHDW_WREN |
2154               MII_TG3_MISC_SHDW_APD_SEL |
2155               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2156         if (enable)
2157                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2158
2159         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2160 }
2161
2162 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2163 {
2164         u32 phy;
2165
2166         if (!tg3_flag(tp, 5705_PLUS) ||
2167             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2168                 return;
2169
2170         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2171                 u32 ephy;
2172
2173                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2174                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2175
2176                         tg3_writephy(tp, MII_TG3_FET_TEST,
2177                                      ephy | MII_TG3_FET_SHADOW_EN);
2178                         if (!tg3_readphy(tp, reg, &phy)) {
2179                                 if (enable)
2180                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2181                                 else
2182                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2183                                 tg3_writephy(tp, reg, phy);
2184                         }
2185                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2186                 }
2187         } else {
2188                 int ret;
2189
2190                 ret = tg3_phy_auxctl_read(tp,
2191                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2192                 if (!ret) {
2193                         if (enable)
2194                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2195                         else
2196                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2197                         tg3_phy_auxctl_write(tp,
2198                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2199                 }
2200         }
2201 }
2202
2203 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2204 {
2205         int ret;
2206         u32 val;
2207
2208         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2209                 return;
2210
2211         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2212         if (!ret)
2213                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2214                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2215 }
2216
2217 static void tg3_phy_apply_otp(struct tg3 *tp)
2218 {
2219         u32 otp, phy;
2220
2221         if (!tp->phy_otp)
2222                 return;
2223
2224         otp = tp->phy_otp;
2225
2226         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2227                 return;
2228
2229         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2230         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2231         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2232
2233         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2234               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2235         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2236
2237         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2238         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2239         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2240
2241         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2242         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2243
2244         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2245         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2246
2247         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2248               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2249         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2250
2251         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2252 }
2253
2254 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2255 {
2256         u32 val;
2257
2258         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2259                 return;
2260
2261         tp->setlpicnt = 0;
2262
2263         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2264             current_link_up == 1 &&
2265             tp->link_config.active_duplex == DUPLEX_FULL &&
2266             (tp->link_config.active_speed == SPEED_100 ||
2267              tp->link_config.active_speed == SPEED_1000)) {
2268                 u32 eeectl;
2269
2270                 if (tp->link_config.active_speed == SPEED_1000)
2271                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2272                 else
2273                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2274
2275                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2276
2277                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2278                                   TG3_CL45_D7_EEERES_STAT, &val);
2279
2280                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2281                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2282                         tp->setlpicnt = 2;
2283         }
2284
2285         if (!tp->setlpicnt) {
2286                 if (current_link_up == 1 &&
2287                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2288                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2289                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2290                 }
2291
2292                 val = tr32(TG3_CPMU_EEE_MODE);
2293                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2294         }
2295 }
2296
2297 static void tg3_phy_eee_enable(struct tg3 *tp)
2298 {
2299         u32 val;
2300
2301         if (tp->link_config.active_speed == SPEED_1000 &&
2302             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2303              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2304              tg3_flag(tp, 57765_CLASS)) &&
2305             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2306                 val = MII_TG3_DSP_TAP26_ALNOKO |
2307                       MII_TG3_DSP_TAP26_RMRXSTO;
2308                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2309                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2310         }
2311
2312         val = tr32(TG3_CPMU_EEE_MODE);
2313         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2314 }
2315
2316 static int tg3_wait_macro_done(struct tg3 *tp)
2317 {
2318         int limit = 100;
2319
2320         while (limit--) {
2321                 u32 tmp32;
2322
2323                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2324                         if ((tmp32 & 0x1000) == 0)
2325                                 break;
2326                 }
2327         }
2328         if (limit < 0)
2329                 return -EBUSY;
2330
2331         return 0;
2332 }
2333
2334 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2335 {
2336         static const u32 test_pat[4][6] = {
2337         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2338         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2339         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2340         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2341         };
2342         int chan;
2343
2344         for (chan = 0; chan < 4; chan++) {
2345                 int i;
2346
2347                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2348                              (chan * 0x2000) | 0x0200);
2349                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2350
2351                 for (i = 0; i < 6; i++)
2352                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2353                                      test_pat[chan][i]);
2354
2355                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2356                 if (tg3_wait_macro_done(tp)) {
2357                         *resetp = 1;
2358                         return -EBUSY;
2359                 }
2360
2361                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2362                              (chan * 0x2000) | 0x0200);
2363                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2364                 if (tg3_wait_macro_done(tp)) {
2365                         *resetp = 1;
2366                         return -EBUSY;
2367                 }
2368
2369                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2370                 if (tg3_wait_macro_done(tp)) {
2371                         *resetp = 1;
2372                         return -EBUSY;
2373                 }
2374
2375                 for (i = 0; i < 6; i += 2) {
2376                         u32 low, high;
2377
2378                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2379                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2380                             tg3_wait_macro_done(tp)) {
2381                                 *resetp = 1;
2382                                 return -EBUSY;
2383                         }
2384                         low &= 0x7fff;
2385                         high &= 0x000f;
2386                         if (low != test_pat[chan][i] ||
2387                             high != test_pat[chan][i+1]) {
2388                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2389                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2390                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2391
2392                                 return -EBUSY;
2393                         }
2394                 }
2395         }
2396
2397         return 0;
2398 }
2399
2400 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2401 {
2402         int chan;
2403
2404         for (chan = 0; chan < 4; chan++) {
2405                 int i;
2406
2407                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2408                              (chan * 0x2000) | 0x0200);
2409                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2410                 for (i = 0; i < 6; i++)
2411                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2412                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2413                 if (tg3_wait_macro_done(tp))
2414                         return -EBUSY;
2415         }
2416
2417         return 0;
2418 }
2419
2420 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2421 {
2422         u32 reg32, phy9_orig;
2423         int retries, do_phy_reset, err;
2424
2425         retries = 10;
2426         do_phy_reset = 1;
2427         do {
2428                 if (do_phy_reset) {
2429                         err = tg3_bmcr_reset(tp);
2430                         if (err)
2431                                 return err;
2432                         do_phy_reset = 0;
2433                 }
2434
2435                 /* Disable transmitter and interrupt.  */
2436                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2437                         continue;
2438
2439                 reg32 |= 0x3000;
2440                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2441
2442                 /* Set full-duplex, 1000 mbps.  */
2443                 tg3_writephy(tp, MII_BMCR,
2444                              BMCR_FULLDPLX | BMCR_SPEED1000);
2445
2446                 /* Set to master mode.  */
2447                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2448                         continue;
2449
2450                 tg3_writephy(tp, MII_CTRL1000,
2451                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2452
2453                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2454                 if (err)
2455                         return err;
2456
2457                 /* Block the PHY control access.  */
2458                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2459
2460                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2461                 if (!err)
2462                         break;
2463         } while (--retries);
2464
2465         err = tg3_phy_reset_chanpat(tp);
2466         if (err)
2467                 return err;
2468
2469         tg3_phydsp_write(tp, 0x8005, 0x0000);
2470
2471         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2472         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2473
2474         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2475
2476         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2477
2478         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2479                 reg32 &= ~0x3000;
2480                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2481         } else if (!err)
2482                 err = -EBUSY;
2483
2484         return err;
2485 }
2486
2487 static void tg3_carrier_on(struct tg3 *tp)
2488 {
2489         netif_carrier_on(tp->dev);
2490         tp->link_up = true;
2491 }
2492
2493 static void tg3_carrier_off(struct tg3 *tp)
2494 {
2495         netif_carrier_off(tp->dev);
2496         tp->link_up = false;
2497 }
2498
2499 /* This will reset the tigon3 PHY if there is no valid
2500  * link unless the FORCE argument is non-zero.
2501  */
2502 static int tg3_phy_reset(struct tg3 *tp)
2503 {
2504         u32 val, cpmuctrl;
2505         int err;
2506
2507         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2508                 val = tr32(GRC_MISC_CFG);
2509                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2510                 udelay(40);
2511         }
2512         err  = tg3_readphy(tp, MII_BMSR, &val);
2513         err |= tg3_readphy(tp, MII_BMSR, &val);
2514         if (err != 0)
2515                 return -EBUSY;
2516
2517         if (netif_running(tp->dev) && tp->link_up) {
2518                 tg3_carrier_off(tp);
2519                 tg3_link_report(tp);
2520         }
2521
2522         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2523             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2524             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2525                 err = tg3_phy_reset_5703_4_5(tp);
2526                 if (err)
2527                         return err;
2528                 goto out;
2529         }
2530
2531         cpmuctrl = 0;
2532         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2533             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2534                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2535                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2536                         tw32(TG3_CPMU_CTRL,
2537                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2538         }
2539
2540         err = tg3_bmcr_reset(tp);
2541         if (err)
2542                 return err;
2543
2544         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2545                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2546                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2547
2548                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2549         }
2550
2551         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2552             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2553                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2554                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2555                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2556                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2557                         udelay(40);
2558                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2559                 }
2560         }
2561
2562         if (tg3_flag(tp, 5717_PLUS) &&
2563             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2564                 return 0;
2565
2566         tg3_phy_apply_otp(tp);
2567
2568         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2569                 tg3_phy_toggle_apd(tp, true);
2570         else
2571                 tg3_phy_toggle_apd(tp, false);
2572
2573 out:
2574         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2575             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2576                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2577                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2578                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2579         }
2580
2581         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2582                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2583                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2584         }
2585
2586         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2587                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2588                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2589                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2590                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2591                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2592                 }
2593         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2594                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2595                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2596                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2597                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2598                                 tg3_writephy(tp, MII_TG3_TEST1,
2599                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2600                         } else
2601                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2602
2603                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2604                 }
2605         }
2606
2607         /* Set Extended packet length bit (bit 14) on all chips that */
2608         /* support jumbo frames */
2609         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2610                 /* Cannot do read-modify-write on 5401 */
2611                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2612         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2613                 /* Set bit 14 with read-modify-write to preserve other bits */
2614                 err = tg3_phy_auxctl_read(tp,
2615                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2616                 if (!err)
2617                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2618                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2619         }
2620
2621         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2622          * jumbo frames transmission.
2623          */
2624         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2625                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2626                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2627                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2628         }
2629
2630         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2631                 /* adjust output voltage */
2632                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2633         }
2634
2635         tg3_phy_toggle_automdix(tp, 1);
2636         tg3_phy_set_wirespeed(tp);
2637         return 0;
2638 }
2639
2640 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2641 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2642 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2643                                           TG3_GPIO_MSG_NEED_VAUX)
2644 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2645         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2646          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2647          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2648          (TG3_GPIO_MSG_DRVR_PRES << 12))
2649
2650 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2651         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2652          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2653          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2654          (TG3_GPIO_MSG_NEED_VAUX << 12))
2655
2656 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2657 {
2658         u32 status, shift;
2659
2660         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2661             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2662                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2663         else
2664                 status = tr32(TG3_CPMU_DRV_STATUS);
2665
2666         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2667         status &= ~(TG3_GPIO_MSG_MASK << shift);
2668         status |= (newstat << shift);
2669
2670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2671             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2672                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2673         else
2674                 tw32(TG3_CPMU_DRV_STATUS, status);
2675
2676         return status >> TG3_APE_GPIO_MSG_SHIFT;
2677 }
2678
2679 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2680 {
2681         if (!tg3_flag(tp, IS_NIC))
2682                 return 0;
2683
2684         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2685             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2686             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2687                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2688                         return -EIO;
2689
2690                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2691
2692                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2693                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2694
2695                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2696         } else {
2697                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2698                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2699         }
2700
2701         return 0;
2702 }
2703
2704 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2705 {
2706         u32 grc_local_ctrl;
2707
2708         if (!tg3_flag(tp, IS_NIC) ||
2709             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2710             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2711                 return;
2712
2713         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2714
2715         tw32_wait_f(GRC_LOCAL_CTRL,
2716                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2717                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2718
2719         tw32_wait_f(GRC_LOCAL_CTRL,
2720                     grc_local_ctrl,
2721                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2722
2723         tw32_wait_f(GRC_LOCAL_CTRL,
2724                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2725                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2726 }
2727
2728 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2729 {
2730         if (!tg3_flag(tp, IS_NIC))
2731                 return;
2732
2733         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2734             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2735                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2736                             (GRC_LCLCTRL_GPIO_OE0 |
2737                              GRC_LCLCTRL_GPIO_OE1 |
2738                              GRC_LCLCTRL_GPIO_OE2 |
2739                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2740                              GRC_LCLCTRL_GPIO_OUTPUT1),
2741                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2742         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2743                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2744                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2745                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2746                                      GRC_LCLCTRL_GPIO_OE1 |
2747                                      GRC_LCLCTRL_GPIO_OE2 |
2748                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2749                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2750                                      tp->grc_local_ctrl;
2751                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2752                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2753
2754                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2755                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2756                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2757
2758                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2759                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2760                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2761         } else {
2762                 u32 no_gpio2;
2763                 u32 grc_local_ctrl = 0;
2764
2765                 /* Workaround to prevent overdrawing Amps. */
2766                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2767                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2768                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2769                                     grc_local_ctrl,
2770                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2771                 }
2772
2773                 /* On 5753 and variants, GPIO2 cannot be used. */
2774                 no_gpio2 = tp->nic_sram_data_cfg &
2775                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2776
2777                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2778                                   GRC_LCLCTRL_GPIO_OE1 |
2779                                   GRC_LCLCTRL_GPIO_OE2 |
2780                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2781                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2782                 if (no_gpio2) {
2783                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2784                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2785                 }
2786                 tw32_wait_f(GRC_LOCAL_CTRL,
2787                             tp->grc_local_ctrl | grc_local_ctrl,
2788                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2789
2790                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2791
2792                 tw32_wait_f(GRC_LOCAL_CTRL,
2793                             tp->grc_local_ctrl | grc_local_ctrl,
2794                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2795
2796                 if (!no_gpio2) {
2797                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2798                         tw32_wait_f(GRC_LOCAL_CTRL,
2799                                     tp->grc_local_ctrl | grc_local_ctrl,
2800                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2801                 }
2802         }
2803 }
2804
2805 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2806 {
2807         u32 msg = 0;
2808
2809         /* Serialize power state transitions */
2810         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2811                 return;
2812
2813         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2814                 msg = TG3_GPIO_MSG_NEED_VAUX;
2815
2816         msg = tg3_set_function_status(tp, msg);
2817
2818         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2819                 goto done;
2820
2821         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2822                 tg3_pwrsrc_switch_to_vaux(tp);
2823         else
2824                 tg3_pwrsrc_die_with_vmain(tp);
2825
2826 done:
2827         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2828 }
2829
2830 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2831 {
2832         bool need_vaux = false;
2833
2834         /* The GPIOs do something completely different on 57765. */
2835         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2836                 return;
2837
2838         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2840             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2841                 tg3_frob_aux_power_5717(tp, include_wol ?
2842                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2843                 return;
2844         }
2845
2846         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2847                 struct net_device *dev_peer;
2848
2849                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2850
2851                 /* remove_one() may have been run on the peer. */
2852                 if (dev_peer) {
2853                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2854
2855                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2856                                 return;
2857
2858                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2859                             tg3_flag(tp_peer, ENABLE_ASF))
2860                                 need_vaux = true;
2861                 }
2862         }
2863
2864         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2865             tg3_flag(tp, ENABLE_ASF))
2866                 need_vaux = true;
2867
2868         if (need_vaux)
2869                 tg3_pwrsrc_switch_to_vaux(tp);
2870         else
2871                 tg3_pwrsrc_die_with_vmain(tp);
2872 }
2873
2874 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2875 {
2876         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2877                 return 1;
2878         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2879                 if (speed != SPEED_10)
2880                         return 1;
2881         } else if (speed == SPEED_10)
2882                 return 1;
2883
2884         return 0;
2885 }
2886
2887 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2888 {
2889         u32 val;
2890
2891         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2892                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2893                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2894                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2895
2896                         sg_dig_ctrl |=
2897                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2898                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2899                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2900                 }
2901                 return;
2902         }
2903
2904         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2905                 tg3_bmcr_reset(tp);
2906                 val = tr32(GRC_MISC_CFG);
2907                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2908                 udelay(40);
2909                 return;
2910         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2911                 u32 phytest;
2912                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2913                         u32 phy;
2914
2915                         tg3_writephy(tp, MII_ADVERTISE, 0);
2916                         tg3_writephy(tp, MII_BMCR,
2917                                      BMCR_ANENABLE | BMCR_ANRESTART);
2918
2919                         tg3_writephy(tp, MII_TG3_FET_TEST,
2920                                      phytest | MII_TG3_FET_SHADOW_EN);
2921                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2922                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2923                                 tg3_writephy(tp,
2924                                              MII_TG3_FET_SHDW_AUXMODE4,
2925                                              phy);
2926                         }
2927                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2928                 }
2929                 return;
2930         } else if (do_low_power) {
2931                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2932                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2933
2934                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2935                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2936                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2937                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2938         }
2939
2940         /* The PHY should not be powered down on some chips because
2941          * of bugs.
2942          */
2943         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2944             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2945             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2946              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
2947             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
2948              !tp->pci_fn))
2949                 return;
2950
2951         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2952             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2953                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2954                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2955                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2956                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2957         }
2958
2959         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2960 }
2961
2962 /* tp->lock is held. */
2963 static int tg3_nvram_lock(struct tg3 *tp)
2964 {
2965         if (tg3_flag(tp, NVRAM)) {
2966                 int i;
2967
2968                 if (tp->nvram_lock_cnt == 0) {
2969                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2970                         for (i = 0; i < 8000; i++) {
2971                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2972                                         break;
2973                                 udelay(20);
2974                         }
2975                         if (i == 8000) {
2976                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2977                                 return -ENODEV;
2978                         }
2979                 }
2980                 tp->nvram_lock_cnt++;
2981         }
2982         return 0;
2983 }
2984
2985 /* tp->lock is held. */
2986 static void tg3_nvram_unlock(struct tg3 *tp)
2987 {
2988         if (tg3_flag(tp, NVRAM)) {
2989                 if (tp->nvram_lock_cnt > 0)
2990                         tp->nvram_lock_cnt--;
2991                 if (tp->nvram_lock_cnt == 0)
2992                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2993         }
2994 }
2995
2996 /* tp->lock is held. */
2997 static void tg3_enable_nvram_access(struct tg3 *tp)
2998 {
2999         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3000                 u32 nvaccess = tr32(NVRAM_ACCESS);
3001
3002                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3003         }
3004 }
3005
3006 /* tp->lock is held. */
3007 static void tg3_disable_nvram_access(struct tg3 *tp)
3008 {
3009         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3010                 u32 nvaccess = tr32(NVRAM_ACCESS);
3011
3012                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3013         }
3014 }
3015
3016 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3017                                         u32 offset, u32 *val)
3018 {
3019         u32 tmp;
3020         int i;
3021
3022         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3023                 return -EINVAL;
3024
3025         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3026                                         EEPROM_ADDR_DEVID_MASK |
3027                                         EEPROM_ADDR_READ);
3028         tw32(GRC_EEPROM_ADDR,
3029              tmp |
3030              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3031              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3032               EEPROM_ADDR_ADDR_MASK) |
3033              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3034
3035         for (i = 0; i < 1000; i++) {
3036                 tmp = tr32(GRC_EEPROM_ADDR);
3037
3038                 if (tmp & EEPROM_ADDR_COMPLETE)
3039                         break;
3040                 msleep(1);
3041         }
3042         if (!(tmp & EEPROM_ADDR_COMPLETE))
3043                 return -EBUSY;
3044
3045         tmp = tr32(GRC_EEPROM_DATA);
3046
3047         /*
3048          * The data will always be opposite the native endian
3049          * format.  Perform a blind byteswap to compensate.
3050          */
3051         *val = swab32(tmp);
3052
3053         return 0;
3054 }
3055
3056 #define NVRAM_CMD_TIMEOUT 10000
3057
3058 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3059 {
3060         int i;
3061
3062         tw32(NVRAM_CMD, nvram_cmd);
3063         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3064                 udelay(10);
3065                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3066                         udelay(10);
3067                         break;
3068                 }
3069         }
3070
3071         if (i == NVRAM_CMD_TIMEOUT)
3072                 return -EBUSY;
3073
3074         return 0;
3075 }
3076
3077 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3078 {
3079         if (tg3_flag(tp, NVRAM) &&
3080             tg3_flag(tp, NVRAM_BUFFERED) &&
3081             tg3_flag(tp, FLASH) &&
3082             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3083             (tp->nvram_jedecnum == JEDEC_ATMEL))
3084
3085                 addr = ((addr / tp->nvram_pagesize) <<
3086                         ATMEL_AT45DB0X1B_PAGE_POS) +
3087                        (addr % tp->nvram_pagesize);
3088
3089         return addr;
3090 }
3091
3092 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3093 {
3094         if (tg3_flag(tp, NVRAM) &&
3095             tg3_flag(tp, NVRAM_BUFFERED) &&
3096             tg3_flag(tp, FLASH) &&
3097             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3098             (tp->nvram_jedecnum == JEDEC_ATMEL))
3099
3100                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3101                         tp->nvram_pagesize) +
3102                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3103
3104         return addr;
3105 }
3106
3107 /* NOTE: Data read in from NVRAM is byteswapped according to
3108  * the byteswapping settings for all other register accesses.
3109  * tg3 devices are BE devices, so on a BE machine, the data
3110  * returned will be exactly as it is seen in NVRAM.  On a LE
3111  * machine, the 32-bit value will be byteswapped.
3112  */
3113 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3114 {
3115         int ret;
3116
3117         if (!tg3_flag(tp, NVRAM))
3118                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3119
3120         offset = tg3_nvram_phys_addr(tp, offset);
3121
3122         if (offset > NVRAM_ADDR_MSK)
3123                 return -EINVAL;
3124
3125         ret = tg3_nvram_lock(tp);
3126         if (ret)
3127                 return ret;
3128
3129         tg3_enable_nvram_access(tp);
3130
3131         tw32(NVRAM_ADDR, offset);
3132         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3133                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3134
3135         if (ret == 0)
3136                 *val = tr32(NVRAM_RDDATA);
3137
3138         tg3_disable_nvram_access(tp);
3139
3140         tg3_nvram_unlock(tp);
3141
3142         return ret;
3143 }
3144
3145 /* Ensures NVRAM data is in bytestream format. */
3146 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3147 {
3148         u32 v;
3149         int res = tg3_nvram_read(tp, offset, &v);
3150         if (!res)
3151                 *val = cpu_to_be32(v);
3152         return res;
3153 }
3154
3155 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3156                                     u32 offset, u32 len, u8 *buf)
3157 {
3158         int i, j, rc = 0;
3159         u32 val;
3160
3161         for (i = 0; i < len; i += 4) {
3162                 u32 addr;
3163                 __be32 data;
3164
3165                 addr = offset + i;
3166
3167                 memcpy(&data, buf + i, 4);
3168
3169                 /*
3170                  * The SEEPROM interface expects the data to always be opposite
3171                  * the native endian format.  We accomplish this by reversing
3172                  * all the operations that would have been performed on the
3173                  * data from a call to tg3_nvram_read_be32().
3174                  */
3175                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3176
3177                 val = tr32(GRC_EEPROM_ADDR);
3178                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3179
3180                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3181                         EEPROM_ADDR_READ);
3182                 tw32(GRC_EEPROM_ADDR, val |
3183                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3184                         (addr & EEPROM_ADDR_ADDR_MASK) |
3185                         EEPROM_ADDR_START |
3186                         EEPROM_ADDR_WRITE);
3187
3188                 for (j = 0; j < 1000; j++) {
3189                         val = tr32(GRC_EEPROM_ADDR);
3190
3191                         if (val & EEPROM_ADDR_COMPLETE)
3192                                 break;
3193                         msleep(1);
3194                 }
3195                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3196                         rc = -EBUSY;
3197                         break;
3198                 }
3199         }
3200
3201         return rc;
3202 }
3203
3204 /* offset and length are dword aligned */
3205 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3206                 u8 *buf)
3207 {
3208         int ret = 0;
3209         u32 pagesize = tp->nvram_pagesize;
3210         u32 pagemask = pagesize - 1;
3211         u32 nvram_cmd;
3212         u8 *tmp;
3213
3214         tmp = kmalloc(pagesize, GFP_KERNEL);
3215         if (tmp == NULL)
3216                 return -ENOMEM;
3217
3218         while (len) {
3219                 int j;
3220                 u32 phy_addr, page_off, size;
3221
3222                 phy_addr = offset & ~pagemask;
3223
3224                 for (j = 0; j < pagesize; j += 4) {
3225                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3226                                                   (__be32 *) (tmp + j));
3227                         if (ret)
3228                                 break;
3229                 }
3230                 if (ret)
3231                         break;
3232
3233                 page_off = offset & pagemask;
3234                 size = pagesize;
3235                 if (len < size)
3236                         size = len;
3237
3238                 len -= size;
3239
3240                 memcpy(tmp + page_off, buf, size);
3241
3242                 offset = offset + (pagesize - page_off);
3243
3244                 tg3_enable_nvram_access(tp);
3245
3246                 /*
3247                  * Before we can erase the flash page, we need
3248                  * to issue a special "write enable" command.
3249                  */
3250                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3251
3252                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3253                         break;
3254
3255                 /* Erase the target page */
3256                 tw32(NVRAM_ADDR, phy_addr);
3257
3258                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3259                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3260
3261                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3262                         break;
3263
3264                 /* Issue another write enable to start the write. */
3265                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3266
3267                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3268                         break;
3269
3270                 for (j = 0; j < pagesize; j += 4) {
3271                         __be32 data;
3272
3273                         data = *((__be32 *) (tmp + j));
3274
3275                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3276
3277                         tw32(NVRAM_ADDR, phy_addr + j);
3278
3279                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3280                                 NVRAM_CMD_WR;
3281
3282                         if (j == 0)
3283                                 nvram_cmd |= NVRAM_CMD_FIRST;
3284                         else if (j == (pagesize - 4))
3285                                 nvram_cmd |= NVRAM_CMD_LAST;
3286
3287                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3288                         if (ret)
3289                                 break;
3290                 }
3291                 if (ret)
3292                         break;
3293         }
3294
3295         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3296         tg3_nvram_exec_cmd(tp, nvram_cmd);
3297
3298         kfree(tmp);
3299
3300         return ret;
3301 }
3302
3303 /* offset and length are dword aligned */
3304 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3305                 u8 *buf)
3306 {
3307         int i, ret = 0;
3308
3309         for (i = 0; i < len; i += 4, offset += 4) {
3310                 u32 page_off, phy_addr, nvram_cmd;
3311                 __be32 data;
3312
3313                 memcpy(&data, buf + i, 4);
3314                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3315
3316                 page_off = offset % tp->nvram_pagesize;
3317
3318                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3319
3320                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3321
3322                 if (page_off == 0 || i == 0)
3323                         nvram_cmd |= NVRAM_CMD_FIRST;
3324                 if (page_off == (tp->nvram_pagesize - 4))
3325                         nvram_cmd |= NVRAM_CMD_LAST;
3326
3327                 if (i == (len - 4))
3328                         nvram_cmd |= NVRAM_CMD_LAST;
3329
3330                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3331                     !tg3_flag(tp, FLASH) ||
3332                     !tg3_flag(tp, 57765_PLUS))
3333                         tw32(NVRAM_ADDR, phy_addr);
3334
3335                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3336                     !tg3_flag(tp, 5755_PLUS) &&
3337                     (tp->nvram_jedecnum == JEDEC_ST) &&
3338                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3339                         u32 cmd;
3340
3341                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3342                         ret = tg3_nvram_exec_cmd(tp, cmd);
3343                         if (ret)
3344                                 break;
3345                 }
3346                 if (!tg3_flag(tp, FLASH)) {
3347                         /* We always do complete word writes to eeprom. */
3348                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3349                 }
3350
3351                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3352                 if (ret)
3353                         break;
3354         }
3355         return ret;
3356 }
3357
3358 /* offset and length are dword aligned */
3359 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3360 {
3361         int ret;
3362
3363         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3364                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3365                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3366                 udelay(40);
3367         }
3368
3369         if (!tg3_flag(tp, NVRAM)) {
3370                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3371         } else {
3372                 u32 grc_mode;
3373
3374                 ret = tg3_nvram_lock(tp);
3375                 if (ret)
3376                         return ret;
3377
3378                 tg3_enable_nvram_access(tp);
3379                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3380                         tw32(NVRAM_WRITE1, 0x406);
3381
3382                 grc_mode = tr32(GRC_MODE);
3383                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3384
3385                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3386                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3387                                 buf);
3388                 } else {
3389                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3390                                 buf);
3391                 }
3392
3393                 grc_mode = tr32(GRC_MODE);
3394                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3395
3396                 tg3_disable_nvram_access(tp);
3397                 tg3_nvram_unlock(tp);
3398         }
3399
3400         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3401                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3402                 udelay(40);
3403         }
3404
3405         return ret;
3406 }
3407
3408 #define RX_CPU_SCRATCH_BASE     0x30000
3409 #define RX_CPU_SCRATCH_SIZE     0x04000
3410 #define TX_CPU_SCRATCH_BASE     0x34000
3411 #define TX_CPU_SCRATCH_SIZE     0x04000
3412
3413 /* tp->lock is held. */
3414 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3415 {
3416         int i;
3417
3418         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3419
3420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3421                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3422
3423                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3424                 return 0;
3425         }
3426         if (offset == RX_CPU_BASE) {
3427                 for (i = 0; i < 10000; i++) {
3428                         tw32(offset + CPU_STATE, 0xffffffff);
3429                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3430                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3431                                 break;
3432                 }
3433
3434                 tw32(offset + CPU_STATE, 0xffffffff);
3435                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3436                 udelay(10);
3437         } else {
3438                 for (i = 0; i < 10000; i++) {
3439                         tw32(offset + CPU_STATE, 0xffffffff);
3440                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3441                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3442                                 break;
3443                 }
3444         }
3445
3446         if (i >= 10000) {
3447                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3448                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3449                 return -ENODEV;
3450         }
3451
3452         /* Clear firmware's nvram arbitration. */
3453         if (tg3_flag(tp, NVRAM))
3454                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3455         return 0;
3456 }
3457
3458 struct fw_info {
3459         unsigned int fw_base;
3460         unsigned int fw_len;
3461         const __be32 *fw_data;
3462 };
3463
3464 /* tp->lock is held. */
3465 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3466                                  u32 cpu_scratch_base, int cpu_scratch_size,
3467                                  struct fw_info *info)
3468 {
3469         int err, lock_err, i;
3470         void (*write_op)(struct tg3 *, u32, u32);
3471
3472         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3473                 netdev_err(tp->dev,
3474                            "%s: Trying to load TX cpu firmware which is 5705\n",
3475                            __func__);
3476                 return -EINVAL;
3477         }
3478
3479         if (tg3_flag(tp, 5705_PLUS))
3480                 write_op = tg3_write_mem;
3481         else
3482                 write_op = tg3_write_indirect_reg32;
3483
3484         /* It is possible that bootcode is still loading at this point.
3485          * Get the nvram lock first before halting the cpu.
3486          */
3487         lock_err = tg3_nvram_lock(tp);
3488         err = tg3_halt_cpu(tp, cpu_base);
3489         if (!lock_err)
3490                 tg3_nvram_unlock(tp);
3491         if (err)
3492                 goto out;
3493
3494         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3495                 write_op(tp, cpu_scratch_base + i, 0);
3496         tw32(cpu_base + CPU_STATE, 0xffffffff);
3497         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3498         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3499                 write_op(tp, (cpu_scratch_base +
3500                               (info->fw_base & 0xffff) +
3501                               (i * sizeof(u32))),
3502                               be32_to_cpu(info->fw_data[i]));
3503
3504         err = 0;
3505
3506 out:
3507         return err;
3508 }
3509
3510 /* tp->lock is held. */
3511 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3512 {
3513         struct fw_info info;
3514         const __be32 *fw_data;
3515         int err, i;
3516
3517         fw_data = (void *)tp->fw->data;
3518
3519         /* Firmware blob starts with version numbers, followed by
3520            start address and length. We are setting complete length.
3521            length = end_address_of_bss - start_address_of_text.
3522            Remainder is the blob to be loaded contiguously
3523            from start address. */
3524
3525         info.fw_base = be32_to_cpu(fw_data[1]);
3526         info.fw_len = tp->fw->size - 12;
3527         info.fw_data = &fw_data[3];
3528
3529         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3530                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3531                                     &info);
3532         if (err)
3533                 return err;
3534
3535         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3536                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3537                                     &info);
3538         if (err)
3539                 return err;
3540
3541         /* Now startup only the RX cpu. */
3542         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3543         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3544
3545         for (i = 0; i < 5; i++) {
3546                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3547                         break;
3548                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3549                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3550                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3551                 udelay(1000);
3552         }
3553         if (i >= 5) {
3554                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3555                            "should be %08x\n", __func__,
3556                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3557                 return -ENODEV;
3558         }
3559         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3560         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3561
3562         return 0;
3563 }
3564
3565 /* tp->lock is held. */
3566 static int tg3_load_tso_firmware(struct tg3 *tp)
3567 {
3568         struct fw_info info;
3569         const __be32 *fw_data;
3570         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3571         int err, i;
3572
3573         if (tg3_flag(tp, HW_TSO_1) ||
3574             tg3_flag(tp, HW_TSO_2) ||
3575             tg3_flag(tp, HW_TSO_3))
3576                 return 0;
3577
3578         fw_data = (void *)tp->fw->data;
3579
3580         /* Firmware blob starts with version numbers, followed by
3581            start address and length. We are setting complete length.
3582            length = end_address_of_bss - start_address_of_text.
3583            Remainder is the blob to be loaded contiguously
3584            from start address. */
3585
3586         info.fw_base = be32_to_cpu(fw_data[1]);
3587         cpu_scratch_size = tp->fw_len;
3588         info.fw_len = tp->fw->size - 12;
3589         info.fw_data = &fw_data[3];
3590
3591         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3592                 cpu_base = RX_CPU_BASE;
3593                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3594         } else {
3595                 cpu_base = TX_CPU_BASE;
3596                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3597                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3598         }
3599
3600         err = tg3_load_firmware_cpu(tp, cpu_base,
3601                                     cpu_scratch_base, cpu_scratch_size,
3602                                     &info);
3603         if (err)
3604                 return err;
3605
3606         /* Now startup the cpu. */
3607         tw32(cpu_base + CPU_STATE, 0xffffffff);
3608         tw32_f(cpu_base + CPU_PC, info.fw_base);
3609
3610         for (i = 0; i < 5; i++) {
3611                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3612                         break;
3613                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3614                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3615                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3616                 udelay(1000);
3617         }
3618         if (i >= 5) {
3619                 netdev_err(tp->dev,
3620                            "%s fails to set CPU PC, is %08x should be %08x\n",
3621                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3622                 return -ENODEV;
3623         }
3624         tw32(cpu_base + CPU_STATE, 0xffffffff);
3625         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3626         return 0;
3627 }
3628
3629
3630 /* tp->lock is held. */
3631 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3632 {
3633         u32 addr_high, addr_low;
3634         int i;
3635
3636         addr_high = ((tp->dev->dev_addr[0] << 8) |
3637                      tp->dev->dev_addr[1]);
3638         addr_low = ((tp->dev->dev_addr[2] << 24) |
3639                     (tp->dev->dev_addr[3] << 16) |
3640                     (tp->dev->dev_addr[4] <<  8) |
3641                     (tp->dev->dev_addr[5] <<  0));
3642         for (i = 0; i < 4; i++) {
3643                 if (i == 1 && skip_mac_1)
3644                         continue;
3645                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3646                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3647         }
3648
3649         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3650             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3651                 for (i = 0; i < 12; i++) {
3652                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3653                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3654                 }
3655         }
3656
3657         addr_high = (tp->dev->dev_addr[0] +
3658                      tp->dev->dev_addr[1] +
3659                      tp->dev->dev_addr[2] +
3660                      tp->dev->dev_addr[3] +
3661                      tp->dev->dev_addr[4] +
3662                      tp->dev->dev_addr[5]) &
3663                 TX_BACKOFF_SEED_MASK;
3664         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3665 }
3666
3667 static void tg3_enable_register_access(struct tg3 *tp)
3668 {
3669         /*
3670          * Make sure register accesses (indirect or otherwise) will function
3671          * correctly.
3672          */
3673         pci_write_config_dword(tp->pdev,
3674                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3675 }
3676
3677 static int tg3_power_up(struct tg3 *tp)
3678 {
3679         int err;
3680
3681         tg3_enable_register_access(tp);
3682
3683         err = pci_set_power_state(tp->pdev, PCI_D0);
3684         if (!err) {
3685                 /* Switch out of Vaux if it is a NIC */
3686                 tg3_pwrsrc_switch_to_vmain(tp);
3687         } else {
3688                 netdev_err(tp->dev, "Transition to D0 failed\n");
3689         }
3690
3691         return err;
3692 }
3693
3694 static int tg3_setup_phy(struct tg3 *, int);
3695
3696 static int tg3_power_down_prepare(struct tg3 *tp)
3697 {
3698         u32 misc_host_ctrl;
3699         bool device_should_wake, do_low_power;
3700
3701         tg3_enable_register_access(tp);
3702
3703         /* Restore the CLKREQ setting. */
3704         if (tg3_flag(tp, CLKREQ_BUG))
3705                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3706                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3707
3708         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3709         tw32(TG3PCI_MISC_HOST_CTRL,
3710              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3711
3712         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3713                              tg3_flag(tp, WOL_ENABLE);
3714
3715         if (tg3_flag(tp, USE_PHYLIB)) {
3716                 do_low_power = false;
3717                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3718                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3719                         struct phy_device *phydev;
3720                         u32 phyid, advertising;
3721
3722                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3723
3724                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3725
3726                         tp->link_config.speed = phydev->speed;
3727                         tp->link_config.duplex = phydev->duplex;
3728                         tp->link_config.autoneg = phydev->autoneg;
3729                         tp->link_config.advertising = phydev->advertising;
3730
3731                         advertising = ADVERTISED_TP |
3732                                       ADVERTISED_Pause |
3733                                       ADVERTISED_Autoneg |
3734                                       ADVERTISED_10baseT_Half;
3735
3736                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3737                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3738                                         advertising |=
3739                                                 ADVERTISED_100baseT_Half |
3740                                                 ADVERTISED_100baseT_Full |
3741                                                 ADVERTISED_10baseT_Full;
3742                                 else
3743                                         advertising |= ADVERTISED_10baseT_Full;
3744                         }
3745
3746                         phydev->advertising = advertising;
3747
3748                         phy_start_aneg(phydev);
3749
3750                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3751                         if (phyid != PHY_ID_BCMAC131) {
3752                                 phyid &= PHY_BCM_OUI_MASK;
3753                                 if (phyid == PHY_BCM_OUI_1 ||
3754                                     phyid == PHY_BCM_OUI_2 ||
3755                                     phyid == PHY_BCM_OUI_3)
3756                                         do_low_power = true;
3757                         }
3758                 }
3759         } else {
3760                 do_low_power = true;
3761
3762                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3763                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3764
3765                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3766                         tg3_setup_phy(tp, 0);
3767         }
3768
3769         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3770                 u32 val;
3771
3772                 val = tr32(GRC_VCPU_EXT_CTRL);
3773                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3774         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3775                 int i;
3776                 u32 val;
3777
3778                 for (i = 0; i < 200; i++) {
3779                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3780                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3781                                 break;
3782                         msleep(1);
3783                 }
3784         }
3785         if (tg3_flag(tp, WOL_CAP))
3786                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3787                                                      WOL_DRV_STATE_SHUTDOWN |
3788                                                      WOL_DRV_WOL |
3789                                                      WOL_SET_MAGIC_PKT);
3790
3791         if (device_should_wake) {
3792                 u32 mac_mode;
3793
3794                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3795                         if (do_low_power &&
3796                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3797                                 tg3_phy_auxctl_write(tp,
3798                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3799                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3800                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3801                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3802                                 udelay(40);
3803                         }
3804
3805                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3806                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3807                         else
3808                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3809
3810                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3811                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3812                             ASIC_REV_5700) {
3813                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3814                                              SPEED_100 : SPEED_10;
3815                                 if (tg3_5700_link_polarity(tp, speed))
3816                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3817                                 else
3818                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3819                         }
3820                 } else {
3821                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3822                 }
3823
3824                 if (!tg3_flag(tp, 5750_PLUS))
3825                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3826
3827                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3828                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3829                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3830                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3831
3832                 if (tg3_flag(tp, ENABLE_APE))
3833                         mac_mode |= MAC_MODE_APE_TX_EN |
3834                                     MAC_MODE_APE_RX_EN |
3835                                     MAC_MODE_TDE_ENABLE;
3836
3837                 tw32_f(MAC_MODE, mac_mode);
3838                 udelay(100);
3839
3840                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3841                 udelay(10);
3842         }
3843
3844         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3845             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3846              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3847                 u32 base_val;
3848
3849                 base_val = tp->pci_clock_ctrl;
3850                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3851                              CLOCK_CTRL_TXCLK_DISABLE);
3852
3853                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3854                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3855         } else if (tg3_flag(tp, 5780_CLASS) ||
3856                    tg3_flag(tp, CPMU_PRESENT) ||
3857                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3858                 /* do nothing */
3859         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3860                 u32 newbits1, newbits2;
3861
3862                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3863                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3864                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3865                                     CLOCK_CTRL_TXCLK_DISABLE |
3866                                     CLOCK_CTRL_ALTCLK);
3867                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3868                 } else if (tg3_flag(tp, 5705_PLUS)) {
3869                         newbits1 = CLOCK_CTRL_625_CORE;
3870                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3871                 } else {
3872                         newbits1 = CLOCK_CTRL_ALTCLK;
3873                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3874                 }
3875
3876                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3877                             40);
3878
3879                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3880                             40);
3881
3882                 if (!tg3_flag(tp, 5705_PLUS)) {
3883                         u32 newbits3;
3884
3885                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3886                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3887                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3888                                             CLOCK_CTRL_TXCLK_DISABLE |
3889                                             CLOCK_CTRL_44MHZ_CORE);
3890                         } else {
3891                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3892                         }
3893
3894                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3895                                     tp->pci_clock_ctrl | newbits3, 40);
3896                 }
3897         }
3898
3899         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3900                 tg3_power_down_phy(tp, do_low_power);
3901
3902         tg3_frob_aux_power(tp, true);
3903
3904         /* Workaround for unstable PLL clock */
3905         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3906             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3907                 u32 val = tr32(0x7d00);
3908
3909                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3910                 tw32(0x7d00, val);
3911                 if (!tg3_flag(tp, ENABLE_ASF)) {
3912                         int err;
3913
3914                         err = tg3_nvram_lock(tp);
3915                         tg3_halt_cpu(tp, RX_CPU_BASE);
3916                         if (!err)
3917                                 tg3_nvram_unlock(tp);
3918                 }
3919         }
3920
3921         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3922
3923         return 0;
3924 }
3925
3926 static void tg3_power_down(struct tg3 *tp)
3927 {
3928         tg3_power_down_prepare(tp);
3929
3930         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3931         pci_set_power_state(tp->pdev, PCI_D3hot);
3932 }
3933
3934 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3935 {
3936         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3937         case MII_TG3_AUX_STAT_10HALF:
3938                 *speed = SPEED_10;
3939                 *duplex = DUPLEX_HALF;
3940                 break;
3941
3942         case MII_TG3_AUX_STAT_10FULL:
3943                 *speed = SPEED_10;
3944                 *duplex = DUPLEX_FULL;
3945                 break;
3946
3947         case MII_TG3_AUX_STAT_100HALF:
3948                 *speed = SPEED_100;
3949                 *duplex = DUPLEX_HALF;
3950                 break;
3951
3952         case MII_TG3_AUX_STAT_100FULL:
3953                 *speed = SPEED_100;
3954                 *duplex = DUPLEX_FULL;
3955                 break;
3956
3957         case MII_TG3_AUX_STAT_1000HALF:
3958                 *speed = SPEED_1000;
3959                 *duplex = DUPLEX_HALF;
3960                 break;
3961
3962         case MII_TG3_AUX_STAT_1000FULL:
3963                 *speed = SPEED_1000;
3964                 *duplex = DUPLEX_FULL;
3965                 break;
3966
3967         default:
3968                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3969                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3970                                  SPEED_10;
3971                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3972                                   DUPLEX_HALF;
3973                         break;
3974                 }
3975                 *speed = SPEED_UNKNOWN;
3976                 *duplex = DUPLEX_UNKNOWN;
3977                 break;
3978         }
3979 }
3980
3981 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3982 {
3983         int err = 0;
3984         u32 val, new_adv;
3985
3986         new_adv = ADVERTISE_CSMA;
3987         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3988         new_adv |= mii_advertise_flowctrl(flowctrl);
3989
3990         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3991         if (err)
3992                 goto done;
3993
3994         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3995                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3996
3997                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3998                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3999                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4000
4001                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4002                 if (err)
4003                         goto done;
4004         }
4005
4006         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4007                 goto done;
4008
4009         tw32(TG3_CPMU_EEE_MODE,
4010              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4011
4012         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
4013         if (!err) {
4014                 u32 err2;
4015
4016                 val = 0;
4017                 /* Advertise 100-BaseTX EEE ability */
4018                 if (advertise & ADVERTISED_100baseT_Full)
4019                         val |= MDIO_AN_EEE_ADV_100TX;
4020                 /* Advertise 1000-BaseT EEE ability */
4021                 if (advertise & ADVERTISED_1000baseT_Full)
4022                         val |= MDIO_AN_EEE_ADV_1000T;
4023                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4024                 if (err)
4025                         val = 0;
4026
4027                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
4028                 case ASIC_REV_5717:
4029                 case ASIC_REV_57765:
4030                 case ASIC_REV_57766:
4031                 case ASIC_REV_5719:
4032                         /* If we advertised any eee advertisements above... */
4033                         if (val)
4034                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4035                                       MII_TG3_DSP_TAP26_RMRXSTO |
4036                                       MII_TG3_DSP_TAP26_OPCSINPT;
4037                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4038                         /* Fall through */
4039                 case ASIC_REV_5720:
4040                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4041                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4042                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4043                 }
4044
4045                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
4046                 if (!err)
4047                         err = err2;
4048         }
4049
4050 done:
4051         return err;
4052 }
4053
4054 static void tg3_phy_copper_begin(struct tg3 *tp)
4055 {
4056         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4057             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4058                 u32 adv, fc;
4059
4060                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
4061                         adv = ADVERTISED_10baseT_Half |
4062                               ADVERTISED_10baseT_Full;
4063                         if (tg3_flag(tp, WOL_SPEED_100MB))
4064                                 adv |= ADVERTISED_100baseT_Half |
4065                                        ADVERTISED_100baseT_Full;
4066
4067                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4068                 } else {
4069                         adv = tp->link_config.advertising;
4070                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4071                                 adv &= ~(ADVERTISED_1000baseT_Half |
4072                                          ADVERTISED_1000baseT_Full);
4073
4074                         fc = tp->link_config.flowctrl;
4075                 }
4076
4077                 tg3_phy_autoneg_cfg(tp, adv, fc);
4078
4079                 tg3_writephy(tp, MII_BMCR,
4080                              BMCR_ANENABLE | BMCR_ANRESTART);
4081         } else {
4082                 int i;
4083                 u32 bmcr, orig_bmcr;
4084
4085                 tp->link_config.active_speed = tp->link_config.speed;
4086                 tp->link_config.active_duplex = tp->link_config.duplex;
4087
4088                 bmcr = 0;
4089                 switch (tp->link_config.speed) {
4090                 default:
4091                 case SPEED_10:
4092                         break;
4093
4094                 case SPEED_100:
4095                         bmcr |= BMCR_SPEED100;
4096                         break;
4097
4098                 case SPEED_1000:
4099                         bmcr |= BMCR_SPEED1000;
4100                         break;
4101                 }
4102
4103                 if (tp->link_config.duplex == DUPLEX_FULL)
4104                         bmcr |= BMCR_FULLDPLX;
4105
4106                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4107                     (bmcr != orig_bmcr)) {
4108                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4109                         for (i = 0; i < 1500; i++) {
4110                                 u32 tmp;
4111
4112                                 udelay(10);
4113                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4114                                     tg3_readphy(tp, MII_BMSR, &tmp))
4115                                         continue;
4116                                 if (!(tmp & BMSR_LSTATUS)) {
4117                                         udelay(40);
4118                                         break;
4119                                 }
4120                         }
4121                         tg3_writephy(tp, MII_BMCR, bmcr);
4122                         udelay(40);
4123                 }
4124         }
4125 }
4126
4127 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4128 {
4129         int err;
4130
4131         /* Turn off tap power management. */
4132         /* Set Extended packet length bit */
4133         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4134
4135         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4136         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4137         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4138         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4139         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4140
4141         udelay(40);
4142
4143         return err;
4144 }
4145
4146 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4147 {
4148         u32 advmsk, tgtadv, advertising;
4149
4150         advertising = tp->link_config.advertising;
4151         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4152
4153         advmsk = ADVERTISE_ALL;
4154         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4155                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4156                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4157         }
4158
4159         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4160                 return false;
4161
4162         if ((*lcladv & advmsk) != tgtadv)
4163                 return false;
4164
4165         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4166                 u32 tg3_ctrl;
4167
4168                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4169
4170                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4171                         return false;
4172
4173                 if (tgtadv &&
4174                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4175                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4176                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4177                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4178                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4179                 } else {
4180                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4181                 }
4182
4183                 if (tg3_ctrl != tgtadv)
4184                         return false;
4185         }
4186
4187         return true;
4188 }
4189
4190 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4191 {
4192         u32 lpeth = 0;
4193
4194         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4195                 u32 val;
4196
4197                 if (tg3_readphy(tp, MII_STAT1000, &val))
4198                         return false;
4199
4200                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4201         }
4202
4203         if (tg3_readphy(tp, MII_LPA, rmtadv))
4204                 return false;
4205
4206         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4207         tp->link_config.rmt_adv = lpeth;
4208
4209         return true;
4210 }
4211
4212 static bool tg3_test_and_report_link_chg(struct tg3 *tp, int curr_link_up)
4213 {
4214         if (curr_link_up != tp->link_up) {
4215                 if (curr_link_up) {
4216                         tg3_carrier_on(tp);
4217                 } else {
4218                         tg3_carrier_off(tp);
4219                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4220                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4221                 }
4222
4223                 tg3_link_report(tp);
4224                 return true;
4225         }
4226
4227         return false;
4228 }
4229
4230 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4231 {
4232         int current_link_up;
4233         u32 bmsr, val;
4234         u32 lcl_adv, rmt_adv;
4235         u16 current_speed;
4236         u8 current_duplex;
4237         int i, err;
4238
4239         tw32(MAC_EVENT, 0);
4240
4241         tw32_f(MAC_STATUS,
4242              (MAC_STATUS_SYNC_CHANGED |
4243               MAC_STATUS_CFG_CHANGED |
4244               MAC_STATUS_MI_COMPLETION |
4245               MAC_STATUS_LNKSTATE_CHANGED));
4246         udelay(40);
4247
4248         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4249                 tw32_f(MAC_MI_MODE,
4250                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4251                 udelay(80);
4252         }
4253
4254         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4255
4256         /* Some third-party PHYs need to be reset on link going
4257          * down.
4258          */
4259         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4260              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4261              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4262             tp->link_up) {
4263                 tg3_readphy(tp, MII_BMSR, &bmsr);
4264                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4265                     !(bmsr & BMSR_LSTATUS))
4266                         force_reset = 1;
4267         }
4268         if (force_reset)
4269                 tg3_phy_reset(tp);
4270
4271         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4272                 tg3_readphy(tp, MII_BMSR, &bmsr);
4273                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4274                     !tg3_flag(tp, INIT_COMPLETE))
4275                         bmsr = 0;
4276
4277                 if (!(bmsr & BMSR_LSTATUS)) {
4278                         err = tg3_init_5401phy_dsp(tp);
4279                         if (err)
4280                                 return err;
4281
4282                         tg3_readphy(tp, MII_BMSR, &bmsr);
4283                         for (i = 0; i < 1000; i++) {
4284                                 udelay(10);
4285                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4286                                     (bmsr & BMSR_LSTATUS)) {
4287                                         udelay(40);
4288                                         break;
4289                                 }
4290                         }
4291
4292                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4293                             TG3_PHY_REV_BCM5401_B0 &&
4294                             !(bmsr & BMSR_LSTATUS) &&
4295                             tp->link_config.active_speed == SPEED_1000) {
4296                                 err = tg3_phy_reset(tp);
4297                                 if (!err)
4298                                         err = tg3_init_5401phy_dsp(tp);
4299                                 if (err)
4300                                         return err;
4301                         }
4302                 }
4303         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4304                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4305                 /* 5701 {A0,B0} CRC bug workaround */
4306                 tg3_writephy(tp, 0x15, 0x0a75);
4307                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4308                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4309                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4310         }
4311
4312         /* Clear pending interrupts... */
4313         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4314         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4315
4316         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4317                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4318         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4319                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4320
4321         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4323                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4324                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4325                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4326                 else
4327                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4328         }
4329
4330         current_link_up = 0;
4331         current_speed = SPEED_UNKNOWN;
4332         current_duplex = DUPLEX_UNKNOWN;
4333         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4334         tp->link_config.rmt_adv = 0;
4335
4336         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4337                 err = tg3_phy_auxctl_read(tp,
4338                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4339                                           &val);
4340                 if (!err && !(val & (1 << 10))) {
4341                         tg3_phy_auxctl_write(tp,
4342                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4343                                              val | (1 << 10));
4344                         goto relink;
4345                 }
4346         }
4347
4348         bmsr = 0;
4349         for (i = 0; i < 100; i++) {
4350                 tg3_readphy(tp, MII_BMSR, &bmsr);
4351                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4352                     (bmsr & BMSR_LSTATUS))
4353                         break;
4354                 udelay(40);
4355         }
4356
4357         if (bmsr & BMSR_LSTATUS) {
4358                 u32 aux_stat, bmcr;
4359
4360                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4361                 for (i = 0; i < 2000; i++) {
4362                         udelay(10);
4363                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4364                             aux_stat)
4365                                 break;
4366                 }
4367
4368                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4369                                              &current_speed,
4370                                              &current_duplex);
4371
4372                 bmcr = 0;
4373                 for (i = 0; i < 200; i++) {
4374                         tg3_readphy(tp, MII_BMCR, &bmcr);
4375                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4376                                 continue;
4377                         if (bmcr && bmcr != 0x7fff)
4378                                 break;
4379                         udelay(10);
4380                 }
4381
4382                 lcl_adv = 0;
4383                 rmt_adv = 0;
4384
4385                 tp->link_config.active_speed = current_speed;
4386                 tp->link_config.active_duplex = current_duplex;
4387
4388                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4389                         if ((bmcr & BMCR_ANENABLE) &&
4390                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4391                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4392                                 current_link_up = 1;
4393                 } else {
4394                         if (!(bmcr & BMCR_ANENABLE) &&
4395                             tp->link_config.speed == current_speed &&
4396                             tp->link_config.duplex == current_duplex &&
4397                             tp->link_config.flowctrl ==
4398                             tp->link_config.active_flowctrl) {
4399                                 current_link_up = 1;
4400                         }
4401                 }
4402
4403                 if (current_link_up == 1 &&
4404                     tp->link_config.active_duplex == DUPLEX_FULL) {
4405                         u32 reg, bit;
4406
4407                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4408                                 reg = MII_TG3_FET_GEN_STAT;
4409                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4410                         } else {
4411                                 reg = MII_TG3_EXT_STAT;
4412                                 bit = MII_TG3_EXT_STAT_MDIX;
4413                         }
4414
4415                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4416                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4417
4418                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4419                 }
4420         }
4421
4422 relink:
4423         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4424                 tg3_phy_copper_begin(tp);
4425
4426                 tg3_readphy(tp, MII_BMSR, &bmsr);
4427                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4428                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4429                         current_link_up = 1;
4430         }
4431
4432         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4433         if (current_link_up == 1) {
4434                 if (tp->link_config.active_speed == SPEED_100 ||
4435                     tp->link_config.active_speed == SPEED_10)
4436                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4437                 else
4438                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4439         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4440                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4441         else
4442                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4443
4444         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4445         if (tp->link_config.active_duplex == DUPLEX_HALF)
4446                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4447
4448         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4449                 if (current_link_up == 1 &&
4450                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4451                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4452                 else
4453                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4454         }
4455
4456         /* ??? Without this setting Netgear GA302T PHY does not
4457          * ??? send/receive packets...
4458          */
4459         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4460             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4461                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4462                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4463                 udelay(80);
4464         }
4465
4466         tw32_f(MAC_MODE, tp->mac_mode);
4467         udelay(40);
4468
4469         tg3_phy_eee_adjust(tp, current_link_up);
4470
4471         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4472                 /* Polled via timer. */
4473                 tw32_f(MAC_EVENT, 0);
4474         } else {
4475                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4476         }
4477         udelay(40);
4478
4479         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4480             current_link_up == 1 &&
4481             tp->link_config.active_speed == SPEED_1000 &&
4482             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4483                 udelay(120);
4484                 tw32_f(MAC_STATUS,
4485                      (MAC_STATUS_SYNC_CHANGED |
4486                       MAC_STATUS_CFG_CHANGED));
4487                 udelay(40);
4488                 tg3_write_mem(tp,
4489                               NIC_SRAM_FIRMWARE_MBOX,
4490                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4491         }
4492
4493         /* Prevent send BD corruption. */
4494         if (tg3_flag(tp, CLKREQ_BUG)) {
4495                 if (tp->link_config.active_speed == SPEED_100 ||
4496                     tp->link_config.active_speed == SPEED_10)
4497                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4498                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4499                 else
4500                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4501                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4502         }
4503
4504         tg3_test_and_report_link_chg(tp, current_link_up);
4505
4506         return 0;
4507 }
4508
4509 struct tg3_fiber_aneginfo {
4510         int state;
4511 #define ANEG_STATE_UNKNOWN              0
4512 #define ANEG_STATE_AN_ENABLE            1
4513 #define ANEG_STATE_RESTART_INIT         2
4514 #define ANEG_STATE_RESTART              3
4515 #define ANEG_STATE_DISABLE_LINK_OK      4
4516 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4517 #define ANEG_STATE_ABILITY_DETECT       6
4518 #define ANEG_STATE_ACK_DETECT_INIT      7
4519 #define ANEG_STATE_ACK_DETECT           8
4520 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4521 #define ANEG_STATE_COMPLETE_ACK         10
4522 #define ANEG_STATE_IDLE_DETECT_INIT     11
4523 #define ANEG_STATE_IDLE_DETECT          12
4524 #define ANEG_STATE_LINK_OK              13
4525 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4526 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4527
4528         u32 flags;
4529 #define MR_AN_ENABLE            0x00000001
4530 #define MR_RESTART_AN           0x00000002
4531 #define MR_AN_COMPLETE          0x00000004
4532 #define MR_PAGE_RX              0x00000008
4533 #define MR_NP_LOADED            0x00000010
4534 #define MR_TOGGLE_TX            0x00000020
4535 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4536 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4537 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4538 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4539 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4540 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4541 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4542 #define MR_TOGGLE_RX            0x00002000
4543 #define MR_NP_RX                0x00004000
4544
4545 #define MR_LINK_OK              0x80000000
4546
4547         unsigned long link_time, cur_time;
4548
4549         u32 ability_match_cfg;
4550         int ability_match_count;
4551
4552         char ability_match, idle_match, ack_match;
4553
4554         u32 txconfig, rxconfig;
4555 #define ANEG_CFG_NP             0x00000080
4556 #define ANEG_CFG_ACK            0x00000040
4557 #define ANEG_CFG_RF2            0x00000020
4558 #define ANEG_CFG_RF1            0x00000010
4559 #define ANEG_CFG_PS2            0x00000001
4560 #define ANEG_CFG_PS1            0x00008000
4561 #define ANEG_CFG_HD             0x00004000
4562 #define ANEG_CFG_FD             0x00002000
4563 #define ANEG_CFG_INVAL          0x00001f06
4564
4565 };
4566 #define ANEG_OK         0
4567 #define ANEG_DONE       1
4568 #define ANEG_TIMER_ENAB 2
4569 #define ANEG_FAILED     -1
4570
4571 #define ANEG_STATE_SETTLE_TIME  10000
4572
4573 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4574                                    struct tg3_fiber_aneginfo *ap)
4575 {
4576         u16 flowctrl;
4577         unsigned long delta;
4578         u32 rx_cfg_reg;
4579         int ret;
4580
4581         if (ap->state == ANEG_STATE_UNKNOWN) {
4582                 ap->rxconfig = 0;
4583                 ap->link_time = 0;
4584                 ap->cur_time = 0;
4585                 ap->ability_match_cfg = 0;
4586                 ap->ability_match_count = 0;
4587                 ap->ability_match = 0;
4588                 ap->idle_match = 0;
4589                 ap->ack_match = 0;
4590         }
4591         ap->cur_time++;
4592
4593         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4594                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4595
4596                 if (rx_cfg_reg != ap->ability_match_cfg) {
4597                         ap->ability_match_cfg = rx_cfg_reg;
4598                         ap->ability_match = 0;
4599                         ap->ability_match_count = 0;
4600                 } else {
4601                         if (++ap->ability_match_count > 1) {
4602                                 ap->ability_match = 1;
4603                                 ap->ability_match_cfg = rx_cfg_reg;
4604                         }
4605                 }
4606                 if (rx_cfg_reg & ANEG_CFG_ACK)
4607                         ap->ack_match = 1;
4608                 else
4609                         ap->ack_match = 0;
4610
4611                 ap->idle_match = 0;
4612         } else {
4613                 ap->idle_match = 1;
4614                 ap->ability_match_cfg = 0;
4615                 ap->ability_match_count = 0;
4616                 ap->ability_match = 0;
4617                 ap->ack_match = 0;
4618
4619                 rx_cfg_reg = 0;
4620         }
4621
4622         ap->rxconfig = rx_cfg_reg;
4623         ret = ANEG_OK;
4624
4625         switch (ap->state) {
4626         case ANEG_STATE_UNKNOWN:
4627                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4628                         ap->state = ANEG_STATE_AN_ENABLE;
4629
4630                 /* fallthru */
4631         case ANEG_STATE_AN_ENABLE:
4632                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4633                 if (ap->flags & MR_AN_ENABLE) {
4634                         ap->link_time = 0;
4635                         ap->cur_time = 0;
4636                         ap->ability_match_cfg = 0;
4637                         ap->ability_match_count = 0;
4638                         ap->ability_match = 0;
4639                         ap->idle_match = 0;
4640                         ap->ack_match = 0;
4641
4642                         ap->state = ANEG_STATE_RESTART_INIT;
4643                 } else {
4644                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4645                 }
4646                 break;
4647
4648         case ANEG_STATE_RESTART_INIT:
4649                 ap->link_time = ap->cur_time;
4650                 ap->flags &= ~(MR_NP_LOADED);
4651                 ap->txconfig = 0;
4652                 tw32(MAC_TX_AUTO_NEG, 0);
4653                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4654                 tw32_f(MAC_MODE, tp->mac_mode);
4655                 udelay(40);
4656
4657                 ret = ANEG_TIMER_ENAB;
4658                 ap->state = ANEG_STATE_RESTART;
4659
4660                 /* fallthru */
4661         case ANEG_STATE_RESTART:
4662                 delta = ap->cur_time - ap->link_time;
4663                 if (delta > ANEG_STATE_SETTLE_TIME)
4664                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4665                 else
4666                         ret = ANEG_TIMER_ENAB;
4667                 break;
4668
4669         case ANEG_STATE_DISABLE_LINK_OK:
4670                 ret = ANEG_DONE;
4671                 break;
4672
4673         case ANEG_STATE_ABILITY_DETECT_INIT:
4674                 ap->flags &= ~(MR_TOGGLE_TX);
4675                 ap->txconfig = ANEG_CFG_FD;
4676                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4677                 if (flowctrl & ADVERTISE_1000XPAUSE)
4678                         ap->txconfig |= ANEG_CFG_PS1;
4679                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4680                         ap->txconfig |= ANEG_CFG_PS2;
4681                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4682                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4683                 tw32_f(MAC_MODE, tp->mac_mode);
4684                 udelay(40);
4685
4686                 ap->state = ANEG_STATE_ABILITY_DETECT;
4687                 break;
4688
4689         case ANEG_STATE_ABILITY_DETECT:
4690                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4691                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4692                 break;
4693
4694         case ANEG_STATE_ACK_DETECT_INIT:
4695                 ap->txconfig |= ANEG_CFG_ACK;
4696                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4697                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4698                 tw32_f(MAC_MODE, tp->mac_mode);
4699                 udelay(40);
4700
4701                 ap->state = ANEG_STATE_ACK_DETECT;
4702
4703                 /* fallthru */
4704         case ANEG_STATE_ACK_DETECT:
4705                 if (ap->ack_match != 0) {
4706                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4707                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4708                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4709                         } else {
4710                                 ap->state = ANEG_STATE_AN_ENABLE;
4711                         }
4712                 } else if (ap->ability_match != 0 &&
4713                            ap->rxconfig == 0) {
4714                         ap->state = ANEG_STATE_AN_ENABLE;
4715                 }
4716                 break;
4717
4718         case ANEG_STATE_COMPLETE_ACK_INIT:
4719                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4720                         ret = ANEG_FAILED;
4721                         break;
4722                 }
4723                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4724                                MR_LP_ADV_HALF_DUPLEX |
4725                                MR_LP_ADV_SYM_PAUSE |
4726                                MR_LP_ADV_ASYM_PAUSE |
4727                                MR_LP_ADV_REMOTE_FAULT1 |
4728                                MR_LP_ADV_REMOTE_FAULT2 |
4729                                MR_LP_ADV_NEXT_PAGE |
4730                                MR_TOGGLE_RX |
4731                                MR_NP_RX);
4732                 if (ap->rxconfig & ANEG_CFG_FD)
4733                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4734                 if (ap->rxconfig & ANEG_CFG_HD)
4735                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4736                 if (ap->rxconfig & ANEG_CFG_PS1)
4737                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4738                 if (ap->rxconfig & ANEG_CFG_PS2)
4739                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4740                 if (ap->rxconfig & ANEG_CFG_RF1)
4741                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4742                 if (ap->rxconfig & ANEG_CFG_RF2)
4743                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4744                 if (ap->rxconfig & ANEG_CFG_NP)
4745                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4746
4747                 ap->link_time = ap->cur_time;
4748
4749                 ap->flags ^= (MR_TOGGLE_TX);
4750                 if (ap->rxconfig & 0x0008)
4751                         ap->flags |= MR_TOGGLE_RX;
4752                 if (ap->rxconfig & ANEG_CFG_NP)
4753                         ap->flags |= MR_NP_RX;
4754                 ap->flags |= MR_PAGE_RX;
4755
4756                 ap->state = ANEG_STATE_COMPLETE_ACK;
4757                 ret = ANEG_TIMER_ENAB;
4758                 break;
4759
4760         case ANEG_STATE_COMPLETE_ACK:
4761                 if (ap->ability_match != 0 &&
4762                     ap->rxconfig == 0) {
4763                         ap->state = ANEG_STATE_AN_ENABLE;
4764                         break;
4765                 }
4766                 delta = ap->cur_time - ap->link_time;
4767                 if (delta > ANEG_STATE_SETTLE_TIME) {
4768                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4769                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4770                         } else {
4771                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4772                                     !(ap->flags & MR_NP_RX)) {
4773                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4774                                 } else {
4775                                         ret = ANEG_FAILED;
4776                                 }
4777                         }
4778                 }
4779                 break;
4780
4781         case ANEG_STATE_IDLE_DETECT_INIT:
4782                 ap->link_time = ap->cur_time;
4783                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4784                 tw32_f(MAC_MODE, tp->mac_mode);
4785                 udelay(40);
4786
4787                 ap->state = ANEG_STATE_IDLE_DETECT;
4788                 ret = ANEG_TIMER_ENAB;
4789                 break;
4790
4791         case ANEG_STATE_IDLE_DETECT:
4792                 if (ap->ability_match != 0 &&
4793                     ap->rxconfig == 0) {
4794                         ap->state = ANEG_STATE_AN_ENABLE;
4795                         break;
4796                 }
4797                 delta = ap->cur_time - ap->link_time;
4798                 if (delta > ANEG_STATE_SETTLE_TIME) {
4799                         /* XXX another gem from the Broadcom driver :( */
4800                         ap->state = ANEG_STATE_LINK_OK;
4801                 }
4802                 break;
4803
4804         case ANEG_STATE_LINK_OK:
4805                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4806                 ret = ANEG_DONE;
4807                 break;
4808
4809         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4810                 /* ??? unimplemented */
4811                 break;
4812
4813         case ANEG_STATE_NEXT_PAGE_WAIT:
4814                 /* ??? unimplemented */
4815                 break;
4816
4817         default:
4818                 ret = ANEG_FAILED;
4819                 break;
4820         }
4821
4822         return ret;
4823 }
4824
4825 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4826 {
4827         int res = 0;
4828         struct tg3_fiber_aneginfo aninfo;
4829         int status = ANEG_FAILED;
4830         unsigned int tick;
4831         u32 tmp;
4832
4833         tw32_f(MAC_TX_AUTO_NEG, 0);
4834
4835         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4836         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4837         udelay(40);
4838
4839         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4840         udelay(40);
4841
4842         memset(&aninfo, 0, sizeof(aninfo));
4843         aninfo.flags |= MR_AN_ENABLE;
4844         aninfo.state = ANEG_STATE_UNKNOWN;
4845         aninfo.cur_time = 0;
4846         tick = 0;
4847         while (++tick < 195000) {
4848                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4849                 if (status == ANEG_DONE || status == ANEG_FAILED)
4850                         break;
4851
4852                 udelay(1);
4853         }
4854
4855         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4856         tw32_f(MAC_MODE, tp->mac_mode);
4857         udelay(40);
4858
4859         *txflags = aninfo.txconfig;
4860         *rxflags = aninfo.flags;
4861
4862         if (status == ANEG_DONE &&
4863             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4864                              MR_LP_ADV_FULL_DUPLEX)))
4865                 res = 1;
4866
4867         return res;
4868 }
4869
4870 static void tg3_init_bcm8002(struct tg3 *tp)
4871 {
4872         u32 mac_status = tr32(MAC_STATUS);
4873         int i;
4874
4875         /* Reset when initting first time or we have a link. */
4876         if (tg3_flag(tp, INIT_COMPLETE) &&
4877             !(mac_status & MAC_STATUS_PCS_SYNCED))
4878                 return;
4879
4880         /* Set PLL lock range. */
4881         tg3_writephy(tp, 0x16, 0x8007);
4882
4883         /* SW reset */
4884         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4885
4886         /* Wait for reset to complete. */
4887         /* XXX schedule_timeout() ... */
4888         for (i = 0; i < 500; i++)
4889                 udelay(10);
4890
4891         /* Config mode; select PMA/Ch 1 regs. */
4892         tg3_writephy(tp, 0x10, 0x8411);
4893
4894         /* Enable auto-lock and comdet, select txclk for tx. */
4895         tg3_writephy(tp, 0x11, 0x0a10);
4896
4897         tg3_writephy(tp, 0x18, 0x00a0);
4898         tg3_writephy(tp, 0x16, 0x41ff);
4899
4900         /* Assert and deassert POR. */
4901         tg3_writephy(tp, 0x13, 0x0400);
4902         udelay(40);
4903         tg3_writephy(tp, 0x13, 0x0000);
4904
4905         tg3_writephy(tp, 0x11, 0x0a50);
4906         udelay(40);
4907         tg3_writephy(tp, 0x11, 0x0a10);
4908
4909         /* Wait for signal to stabilize */
4910         /* XXX schedule_timeout() ... */
4911         for (i = 0; i < 15000; i++)
4912                 udelay(10);
4913
4914         /* Deselect the channel register so we can read the PHYID
4915          * later.
4916          */
4917         tg3_writephy(tp, 0x10, 0x8011);
4918 }
4919
4920 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4921 {
4922         u16 flowctrl;
4923         u32 sg_dig_ctrl, sg_dig_status;
4924         u32 serdes_cfg, expected_sg_dig_ctrl;
4925         int workaround, port_a;
4926         int current_link_up;
4927
4928         serdes_cfg = 0;
4929         expected_sg_dig_ctrl = 0;
4930         workaround = 0;
4931         port_a = 1;
4932         current_link_up = 0;
4933
4934         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4935             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4936                 workaround = 1;
4937                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4938                         port_a = 0;
4939
4940                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4941                 /* preserve bits 20-23 for voltage regulator */
4942                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4943         }
4944
4945         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4946
4947         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4948                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4949                         if (workaround) {
4950                                 u32 val = serdes_cfg;
4951
4952                                 if (port_a)
4953                                         val |= 0xc010000;
4954                                 else
4955                                         val |= 0x4010000;
4956                                 tw32_f(MAC_SERDES_CFG, val);
4957                         }
4958
4959                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4960                 }
4961                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4962                         tg3_setup_flow_control(tp, 0, 0);
4963                         current_link_up = 1;
4964                 }
4965                 goto out;
4966         }
4967
4968         /* Want auto-negotiation.  */
4969         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4970
4971         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4972         if (flowctrl & ADVERTISE_1000XPAUSE)
4973                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4974         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4975                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4976
4977         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4978                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4979                     tp->serdes_counter &&
4980                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4981                                     MAC_STATUS_RCVD_CFG)) ==
4982                      MAC_STATUS_PCS_SYNCED)) {
4983                         tp->serdes_counter--;
4984                         current_link_up = 1;
4985                         goto out;
4986                 }
4987 restart_autoneg:
4988                 if (workaround)
4989                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4990                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4991                 udelay(5);
4992                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4993
4994                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4995                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4996         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4997                                  MAC_STATUS_SIGNAL_DET)) {
4998                 sg_dig_status = tr32(SG_DIG_STATUS);
4999                 mac_status = tr32(MAC_STATUS);
5000
5001                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5002                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5003                         u32 local_adv = 0, remote_adv = 0;
5004
5005                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5006                                 local_adv |= ADVERTISE_1000XPAUSE;
5007                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5008                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5009
5010                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5011                                 remote_adv |= LPA_1000XPAUSE;
5012                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5013                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5014
5015                         tp->link_config.rmt_adv =
5016                                            mii_adv_to_ethtool_adv_x(remote_adv);
5017
5018                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5019                         current_link_up = 1;
5020                         tp->serdes_counter = 0;
5021                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5022                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5023                         if (tp->serdes_counter)
5024                                 tp->serdes_counter--;
5025                         else {
5026                                 if (workaround) {
5027                                         u32 val = serdes_cfg;
5028
5029                                         if (port_a)
5030                                                 val |= 0xc010000;
5031                                         else
5032                                                 val |= 0x4010000;
5033
5034                                         tw32_f(MAC_SERDES_CFG, val);
5035                                 }
5036
5037                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5038                                 udelay(40);
5039
5040                                 /* Link parallel detection - link is up */
5041                                 /* only if we have PCS_SYNC and not */
5042                                 /* receiving config code words */
5043                                 mac_status = tr32(MAC_STATUS);
5044                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5045                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5046                                         tg3_setup_flow_control(tp, 0, 0);
5047                                         current_link_up = 1;
5048                                         tp->phy_flags |=
5049                                                 TG3_PHYFLG_PARALLEL_DETECT;
5050                                         tp->serdes_counter =
5051                                                 SERDES_PARALLEL_DET_TIMEOUT;
5052                                 } else
5053                                         goto restart_autoneg;
5054                         }
5055                 }
5056         } else {
5057                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5058                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5059         }
5060
5061 out:
5062         return current_link_up;
5063 }
5064
5065 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5066 {
5067         int current_link_up = 0;
5068
5069         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5070                 goto out;
5071
5072         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5073                 u32 txflags, rxflags;
5074                 int i;
5075
5076                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5077                         u32 local_adv = 0, remote_adv = 0;
5078
5079                         if (txflags & ANEG_CFG_PS1)
5080                                 local_adv |= ADVERTISE_1000XPAUSE;
5081                         if (txflags & ANEG_CFG_PS2)
5082                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5083
5084                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5085                                 remote_adv |= LPA_1000XPAUSE;
5086                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5087                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5088
5089                         tp->link_config.rmt_adv =
5090                                            mii_adv_to_ethtool_adv_x(remote_adv);
5091
5092                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5093
5094                         current_link_up = 1;
5095                 }
5096                 for (i = 0; i < 30; i++) {
5097                         udelay(20);
5098                         tw32_f(MAC_STATUS,
5099                                (MAC_STATUS_SYNC_CHANGED |
5100                                 MAC_STATUS_CFG_CHANGED));
5101                         udelay(40);
5102                         if ((tr32(MAC_STATUS) &
5103                              (MAC_STATUS_SYNC_CHANGED |
5104                               MAC_STATUS_CFG_CHANGED)) == 0)
5105                                 break;
5106                 }
5107
5108                 mac_status = tr32(MAC_STATUS);
5109                 if (current_link_up == 0 &&
5110                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5111                     !(mac_status & MAC_STATUS_RCVD_CFG))
5112                         current_link_up = 1;
5113         } else {
5114                 tg3_setup_flow_control(tp, 0, 0);
5115
5116                 /* Forcing 1000FD link up. */
5117                 current_link_up = 1;
5118
5119                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5120                 udelay(40);
5121
5122                 tw32_f(MAC_MODE, tp->mac_mode);
5123                 udelay(40);
5124         }
5125
5126 out:
5127         return current_link_up;
5128 }
5129
5130 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
5131 {
5132         u32 orig_pause_cfg;
5133         u16 orig_active_speed;
5134         u8 orig_active_duplex;
5135         u32 mac_status;
5136         int current_link_up;
5137         int i;
5138
5139         orig_pause_cfg = tp->link_config.active_flowctrl;
5140         orig_active_speed = tp->link_config.active_speed;
5141         orig_active_duplex = tp->link_config.active_duplex;
5142
5143         if (!tg3_flag(tp, HW_AUTONEG) &&
5144             tp->link_up &&
5145             tg3_flag(tp, INIT_COMPLETE)) {
5146                 mac_status = tr32(MAC_STATUS);
5147                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5148                                MAC_STATUS_SIGNAL_DET |
5149                                MAC_STATUS_CFG_CHANGED |
5150                                MAC_STATUS_RCVD_CFG);
5151                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5152                                    MAC_STATUS_SIGNAL_DET)) {
5153                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5154                                             MAC_STATUS_CFG_CHANGED));
5155                         return 0;
5156                 }
5157         }
5158
5159         tw32_f(MAC_TX_AUTO_NEG, 0);
5160
5161         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5162         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5163         tw32_f(MAC_MODE, tp->mac_mode);
5164         udelay(40);
5165
5166         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5167                 tg3_init_bcm8002(tp);
5168
5169         /* Enable link change event even when serdes polling.  */
5170         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5171         udelay(40);
5172
5173         current_link_up = 0;
5174         tp->link_config.rmt_adv = 0;
5175         mac_status = tr32(MAC_STATUS);
5176
5177         if (tg3_flag(tp, HW_AUTONEG))
5178                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5179         else
5180                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5181
5182         tp->napi[0].hw_status->status =
5183                 (SD_STATUS_UPDATED |
5184                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5185
5186         for (i = 0; i < 100; i++) {
5187                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5188                                     MAC_STATUS_CFG_CHANGED));
5189                 udelay(5);
5190                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5191                                          MAC_STATUS_CFG_CHANGED |
5192                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5193                         break;
5194         }
5195
5196         mac_status = tr32(MAC_STATUS);
5197         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5198                 current_link_up = 0;
5199                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5200                     tp->serdes_counter == 0) {
5201                         tw32_f(MAC_MODE, (tp->mac_mode |
5202                                           MAC_MODE_SEND_CONFIGS));
5203                         udelay(1);
5204                         tw32_f(MAC_MODE, tp->mac_mode);
5205                 }
5206         }
5207
5208         if (current_link_up == 1) {
5209                 tp->link_config.active_speed = SPEED_1000;
5210                 tp->link_config.active_duplex = DUPLEX_FULL;
5211                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5212                                     LED_CTRL_LNKLED_OVERRIDE |
5213                                     LED_CTRL_1000MBPS_ON));
5214         } else {
5215                 tp->link_config.active_speed = SPEED_UNKNOWN;
5216                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5217                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5218                                     LED_CTRL_LNKLED_OVERRIDE |
5219                                     LED_CTRL_TRAFFIC_OVERRIDE));
5220         }
5221
5222         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5223                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5224                 if (orig_pause_cfg != now_pause_cfg ||
5225                     orig_active_speed != tp->link_config.active_speed ||
5226                     orig_active_duplex != tp->link_config.active_duplex)
5227                         tg3_link_report(tp);
5228         }
5229
5230         return 0;
5231 }
5232
5233 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5234 {
5235         int current_link_up, err = 0;
5236         u32 bmsr, bmcr;
5237         u16 current_speed;
5238         u8 current_duplex;
5239         u32 local_adv, remote_adv;
5240
5241         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5242         tw32_f(MAC_MODE, tp->mac_mode);
5243         udelay(40);
5244
5245         tw32(MAC_EVENT, 0);
5246
5247         tw32_f(MAC_STATUS,
5248              (MAC_STATUS_SYNC_CHANGED |
5249               MAC_STATUS_CFG_CHANGED |
5250               MAC_STATUS_MI_COMPLETION |
5251               MAC_STATUS_LNKSTATE_CHANGED));
5252         udelay(40);
5253
5254         if (force_reset)
5255                 tg3_phy_reset(tp);
5256
5257         current_link_up = 0;
5258         current_speed = SPEED_UNKNOWN;
5259         current_duplex = DUPLEX_UNKNOWN;
5260         tp->link_config.rmt_adv = 0;
5261
5262         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5263         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5264         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5265                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5266                         bmsr |= BMSR_LSTATUS;
5267                 else
5268                         bmsr &= ~BMSR_LSTATUS;
5269         }
5270
5271         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5272
5273         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5274             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5275                 /* do nothing, just check for link up at the end */
5276         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5277                 u32 adv, newadv;
5278
5279                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5280                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5281                                  ADVERTISE_1000XPAUSE |
5282                                  ADVERTISE_1000XPSE_ASYM |
5283                                  ADVERTISE_SLCT);
5284
5285                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5286                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5287
5288                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5289                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5290                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5291                         tg3_writephy(tp, MII_BMCR, bmcr);
5292
5293                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5294                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5295                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5296
5297                         return err;
5298                 }
5299         } else {
5300                 u32 new_bmcr;
5301
5302                 bmcr &= ~BMCR_SPEED1000;
5303                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5304
5305                 if (tp->link_config.duplex == DUPLEX_FULL)
5306                         new_bmcr |= BMCR_FULLDPLX;
5307
5308                 if (new_bmcr != bmcr) {
5309                         /* BMCR_SPEED1000 is a reserved bit that needs
5310                          * to be set on write.
5311                          */
5312                         new_bmcr |= BMCR_SPEED1000;
5313
5314                         /* Force a linkdown */
5315                         if (tp->link_up) {
5316                                 u32 adv;
5317
5318                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5319                                 adv &= ~(ADVERTISE_1000XFULL |
5320                                          ADVERTISE_1000XHALF |
5321                                          ADVERTISE_SLCT);
5322                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5323                                 tg3_writephy(tp, MII_BMCR, bmcr |
5324                                                            BMCR_ANRESTART |
5325                                                            BMCR_ANENABLE);
5326                                 udelay(10);
5327                                 tg3_carrier_off(tp);
5328                         }
5329                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5330                         bmcr = new_bmcr;
5331                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5332                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5333                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5334                             ASIC_REV_5714) {
5335                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5336                                         bmsr |= BMSR_LSTATUS;
5337                                 else
5338                                         bmsr &= ~BMSR_LSTATUS;
5339                         }
5340                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5341                 }
5342         }
5343
5344         if (bmsr & BMSR_LSTATUS) {
5345                 current_speed = SPEED_1000;
5346                 current_link_up = 1;
5347                 if (bmcr & BMCR_FULLDPLX)
5348                         current_duplex = DUPLEX_FULL;
5349                 else
5350                         current_duplex = DUPLEX_HALF;
5351
5352                 local_adv = 0;
5353                 remote_adv = 0;
5354
5355                 if (bmcr & BMCR_ANENABLE) {
5356                         u32 common;
5357
5358                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5359                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5360                         common = local_adv & remote_adv;
5361                         if (common & (ADVERTISE_1000XHALF |
5362                                       ADVERTISE_1000XFULL)) {
5363                                 if (common & ADVERTISE_1000XFULL)
5364                                         current_duplex = DUPLEX_FULL;
5365                                 else
5366                                         current_duplex = DUPLEX_HALF;
5367
5368                                 tp->link_config.rmt_adv =
5369                                            mii_adv_to_ethtool_adv_x(remote_adv);
5370                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5371                                 /* Link is up via parallel detect */
5372                         } else {
5373                                 current_link_up = 0;
5374                         }
5375                 }
5376         }
5377
5378         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5379                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5380
5381         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5382         if (tp->link_config.active_duplex == DUPLEX_HALF)
5383                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5384
5385         tw32_f(MAC_MODE, tp->mac_mode);
5386         udelay(40);
5387
5388         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5389
5390         tp->link_config.active_speed = current_speed;
5391         tp->link_config.active_duplex = current_duplex;
5392
5393         tg3_test_and_report_link_chg(tp, current_link_up);
5394         return err;
5395 }
5396
5397 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5398 {
5399         if (tp->serdes_counter) {
5400                 /* Give autoneg time to complete. */
5401                 tp->serdes_counter--;
5402                 return;
5403         }
5404
5405         if (!tp->link_up &&
5406             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5407                 u32 bmcr;
5408
5409                 tg3_readphy(tp, MII_BMCR, &bmcr);
5410                 if (bmcr & BMCR_ANENABLE) {
5411                         u32 phy1, phy2;
5412
5413                         /* Select shadow register 0x1f */
5414                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5415                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5416
5417                         /* Select expansion interrupt status register */
5418                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5419                                          MII_TG3_DSP_EXP1_INT_STAT);
5420                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5421                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5422
5423                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5424                                 /* We have signal detect and not receiving
5425                                  * config code words, link is up by parallel
5426                                  * detection.
5427                                  */
5428
5429                                 bmcr &= ~BMCR_ANENABLE;
5430                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5431                                 tg3_writephy(tp, MII_BMCR, bmcr);
5432                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5433                         }
5434                 }
5435         } else if (tp->link_up &&
5436                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5437                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5438                 u32 phy2;
5439
5440                 /* Select expansion interrupt status register */
5441                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5442                                  MII_TG3_DSP_EXP1_INT_STAT);
5443                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5444                 if (phy2 & 0x20) {
5445                         u32 bmcr;
5446
5447                         /* Config code words received, turn on autoneg. */
5448                         tg3_readphy(tp, MII_BMCR, &bmcr);
5449                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5450
5451                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5452
5453                 }
5454         }
5455 }
5456
5457 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5458 {
5459         u32 val;
5460         int err;
5461
5462         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5463                 err = tg3_setup_fiber_phy(tp, force_reset);
5464         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5465                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5466         else
5467                 err = tg3_setup_copper_phy(tp, force_reset);
5468
5469         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5470                 u32 scale;
5471
5472                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5473                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5474                         scale = 65;
5475                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5476                         scale = 6;
5477                 else
5478                         scale = 12;
5479
5480                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5481                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5482                 tw32(GRC_MISC_CFG, val);
5483         }
5484
5485         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5486               (6 << TX_LENGTHS_IPG_SHIFT);
5487         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5488                 val |= tr32(MAC_TX_LENGTHS) &
5489                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5490                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5491
5492         if (tp->link_config.active_speed == SPEED_1000 &&
5493             tp->link_config.active_duplex == DUPLEX_HALF)
5494                 tw32(MAC_TX_LENGTHS, val |
5495                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5496         else
5497                 tw32(MAC_TX_LENGTHS, val |
5498                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5499
5500         if (!tg3_flag(tp, 5705_PLUS)) {
5501                 if (tp->link_up) {
5502                         tw32(HOSTCC_STAT_COAL_TICKS,
5503                              tp->coal.stats_block_coalesce_usecs);
5504                 } else {
5505                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5506                 }
5507         }
5508
5509         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5510                 val = tr32(PCIE_PWR_MGMT_THRESH);
5511                 if (!tp->link_up)
5512                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5513                               tp->pwrmgmt_thresh;
5514                 else
5515                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5516                 tw32(PCIE_PWR_MGMT_THRESH, val);
5517         }
5518
5519         return err;
5520 }
5521
5522 /* tp->lock must be held */
5523 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
5524 {
5525         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
5526         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
5527         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
5528         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
5529 }
5530
5531 /* tp->lock must be held */
5532 static void tg3_ptp_init(struct tg3 *tp)
5533 {
5534         if (!tg3_flag(tp, PTP_CAPABLE))
5535                 return;
5536
5537         /* Initialize the hardware clock to the system time. */
5538         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
5539         tp->ptp_adjust = 0;
5540 }
5541
5542 /* tp->lock must be held */
5543 static void tg3_ptp_resume(struct tg3 *tp)
5544 {
5545         if (!tg3_flag(tp, PTP_CAPABLE))
5546                 return;
5547
5548         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
5549         tp->ptp_adjust = 0;
5550 }
5551
5552 static void tg3_ptp_fini(struct tg3 *tp)
5553 {
5554         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
5555                 return;
5556
5557         tp->ptp_clock = NULL;
5558         tp->ptp_adjust = 0;
5559 }
5560
5561 static inline int tg3_irq_sync(struct tg3 *tp)
5562 {
5563         return tp->irq_sync;
5564 }
5565
5566 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5567 {
5568         int i;
5569
5570         dst = (u32 *)((u8 *)dst + off);
5571         for (i = 0; i < len; i += sizeof(u32))
5572                 *dst++ = tr32(off + i);
5573 }
5574
5575 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5576 {
5577         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5578         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5579         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5580         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5581         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5582         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5583         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5584         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5585         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5586         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5587         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5588         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5589         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5590         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5591         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5592         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5593         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5594         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5595         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5596
5597         if (tg3_flag(tp, SUPPORT_MSIX))
5598                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5599
5600         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5601         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5602         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5603         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5604         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5605         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5606         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5607         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5608
5609         if (!tg3_flag(tp, 5705_PLUS)) {
5610                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5611                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5612                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5613         }
5614
5615         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5616         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5617         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5618         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5619         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5620
5621         if (tg3_flag(tp, NVRAM))
5622                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5623 }
5624
5625 static void tg3_dump_state(struct tg3 *tp)
5626 {
5627         int i;
5628         u32 *regs;
5629
5630         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5631         if (!regs) {
5632                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5633                 return;
5634         }
5635
5636         if (tg3_flag(tp, PCI_EXPRESS)) {
5637                 /* Read up to but not including private PCI registers */
5638                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5639                         regs[i / sizeof(u32)] = tr32(i);
5640         } else
5641                 tg3_dump_legacy_regs(tp, regs);
5642
5643         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5644                 if (!regs[i + 0] && !regs[i + 1] &&
5645                     !regs[i + 2] && !regs[i + 3])
5646                         continue;
5647
5648                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5649                            i * 4,
5650                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5651         }
5652
5653         kfree(regs);
5654
5655         for (i = 0; i < tp->irq_cnt; i++) {
5656                 struct tg3_napi *tnapi = &tp->napi[i];
5657
5658                 /* SW status block */
5659                 netdev_err(tp->dev,
5660                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5661                            i,
5662                            tnapi->hw_status->status,
5663                            tnapi->hw_status->status_tag,
5664                            tnapi->hw_status->rx_jumbo_consumer,
5665                            tnapi->hw_status->rx_consumer,
5666                            tnapi->hw_status->rx_mini_consumer,
5667                            tnapi->hw_status->idx[0].rx_producer,
5668                            tnapi->hw_status->idx[0].tx_consumer);
5669
5670                 netdev_err(tp->dev,
5671                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5672                            i,
5673                            tnapi->last_tag, tnapi->last_irq_tag,
5674                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5675                            tnapi->rx_rcb_ptr,
5676                            tnapi->prodring.rx_std_prod_idx,
5677                            tnapi->prodring.rx_std_cons_idx,
5678                            tnapi->prodring.rx_jmb_prod_idx,
5679                            tnapi->prodring.rx_jmb_cons_idx);
5680         }
5681 }
5682
5683 /* This is called whenever we suspect that the system chipset is re-
5684  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5685  * is bogus tx completions. We try to recover by setting the
5686  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5687  * in the workqueue.
5688  */
5689 static void tg3_tx_recover(struct tg3 *tp)
5690 {
5691         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5692                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5693
5694         netdev_warn(tp->dev,
5695                     "The system may be re-ordering memory-mapped I/O "
5696                     "cycles to the network device, attempting to recover. "
5697                     "Please report the problem to the driver maintainer "
5698                     "and include system chipset information.\n");
5699
5700         spin_lock(&tp->lock);
5701         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5702         spin_unlock(&tp->lock);
5703 }
5704
5705 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5706 {
5707         /* Tell compiler to fetch tx indices from memory. */
5708         barrier();
5709         return tnapi->tx_pending -
5710                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5711 }
5712
5713 /* Tigon3 never reports partial packet sends.  So we do not
5714  * need special logic to handle SKBs that have not had all
5715  * of their frags sent yet, like SunGEM does.
5716  */
5717 static void tg3_tx(struct tg3_napi *tnapi)
5718 {
5719         struct tg3 *tp = tnapi->tp;
5720         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5721         u32 sw_idx = tnapi->tx_cons;
5722         struct netdev_queue *txq;
5723         int index = tnapi - tp->napi;
5724         unsigned int pkts_compl = 0, bytes_compl = 0;
5725
5726         if (tg3_flag(tp, ENABLE_TSS))
5727                 index--;
5728
5729         txq = netdev_get_tx_queue(tp->dev, index);
5730
5731         while (sw_idx != hw_idx) {
5732                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5733                 struct sk_buff *skb = ri->skb;
5734                 int i, tx_bug = 0;
5735
5736                 if (unlikely(skb == NULL)) {
5737                         tg3_tx_recover(tp);
5738                         return;
5739                 }
5740
5741                 pci_unmap_single(tp->pdev,
5742                                  dma_unmap_addr(ri, mapping),
5743                                  skb_headlen(skb),
5744                                  PCI_DMA_TODEVICE);
5745
5746                 ri->skb = NULL;
5747
5748                 while (ri->fragmented) {
5749                         ri->fragmented = false;
5750                         sw_idx = NEXT_TX(sw_idx);
5751                         ri = &tnapi->tx_buffers[sw_idx];
5752                 }
5753
5754                 sw_idx = NEXT_TX(sw_idx);
5755
5756                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5757                         ri = &tnapi->tx_buffers[sw_idx];
5758                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5759                                 tx_bug = 1;
5760
5761                         pci_unmap_page(tp->pdev,
5762                                        dma_unmap_addr(ri, mapping),
5763                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5764                                        PCI_DMA_TODEVICE);
5765
5766                         while (ri->fragmented) {
5767                                 ri->fragmented = false;
5768                                 sw_idx = NEXT_TX(sw_idx);
5769                                 ri = &tnapi->tx_buffers[sw_idx];
5770                         }
5771
5772                         sw_idx = NEXT_TX(sw_idx);
5773                 }
5774
5775                 pkts_compl++;
5776                 bytes_compl += skb->len;
5777
5778                 dev_kfree_skb(skb);
5779
5780                 if (unlikely(tx_bug)) {
5781                         tg3_tx_recover(tp);
5782                         return;
5783                 }
5784         }
5785
5786         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5787
5788         tnapi->tx_cons = sw_idx;
5789
5790         /* Need to make the tx_cons update visible to tg3_start_xmit()
5791          * before checking for netif_queue_stopped().  Without the
5792          * memory barrier, there is a small possibility that tg3_start_xmit()
5793          * will miss it and cause the queue to be stopped forever.
5794          */
5795         smp_mb();
5796
5797         if (unlikely(netif_tx_queue_stopped(txq) &&
5798                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5799                 __netif_tx_lock(txq, smp_processor_id());
5800                 if (netif_tx_queue_stopped(txq) &&
5801                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5802                         netif_tx_wake_queue(txq);
5803                 __netif_tx_unlock(txq);
5804         }
5805 }
5806
5807 static void tg3_frag_free(bool is_frag, void *data)
5808 {
5809         if (is_frag)
5810                 put_page(virt_to_head_page(data));
5811         else
5812                 kfree(data);
5813 }
5814
5815 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5816 {
5817         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5818                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5819
5820         if (!ri->data)
5821                 return;
5822
5823         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5824                          map_sz, PCI_DMA_FROMDEVICE);
5825         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
5826         ri->data = NULL;
5827 }
5828
5829
5830 /* Returns size of skb allocated or < 0 on error.
5831  *
5832  * We only need to fill in the address because the other members
5833  * of the RX descriptor are invariant, see tg3_init_rings.
5834  *
5835  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5836  * posting buffers we only dirty the first cache line of the RX
5837  * descriptor (containing the address).  Whereas for the RX status
5838  * buffers the cpu only reads the last cacheline of the RX descriptor
5839  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5840  */
5841 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5842                              u32 opaque_key, u32 dest_idx_unmasked,
5843                              unsigned int *frag_size)
5844 {
5845         struct tg3_rx_buffer_desc *desc;
5846         struct ring_info *map;
5847         u8 *data;
5848         dma_addr_t mapping;
5849         int skb_size, data_size, dest_idx;
5850
5851         switch (opaque_key) {
5852         case RXD_OPAQUE_RING_STD:
5853                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5854                 desc = &tpr->rx_std[dest_idx];
5855                 map = &tpr->rx_std_buffers[dest_idx];
5856                 data_size = tp->rx_pkt_map_sz;
5857                 break;
5858
5859         case RXD_OPAQUE_RING_JUMBO:
5860                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5861                 desc = &tpr->rx_jmb[dest_idx].std;
5862                 map = &tpr->rx_jmb_buffers[dest_idx];
5863                 data_size = TG3_RX_JMB_MAP_SZ;
5864                 break;
5865
5866         default:
5867                 return -EINVAL;
5868         }
5869
5870         /* Do not overwrite any of the map or rp information
5871          * until we are sure we can commit to a new buffer.
5872          *
5873          * Callers depend upon this behavior and assume that
5874          * we leave everything unchanged if we fail.
5875          */
5876         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5877                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5878         if (skb_size <= PAGE_SIZE) {
5879                 data = netdev_alloc_frag(skb_size);
5880                 *frag_size = skb_size;
5881         } else {
5882                 data = kmalloc(skb_size, GFP_ATOMIC);
5883                 *frag_size = 0;
5884         }
5885         if (!data)
5886                 return -ENOMEM;
5887
5888         mapping = pci_map_single(tp->pdev,
5889                                  data + TG3_RX_OFFSET(tp),
5890                                  data_size,
5891                                  PCI_DMA_FROMDEVICE);
5892         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5893                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
5894                 return -EIO;
5895         }
5896
5897         map->data = data;
5898         dma_unmap_addr_set(map, mapping, mapping);
5899
5900         desc->addr_hi = ((u64)mapping >> 32);
5901         desc->addr_lo = ((u64)mapping & 0xffffffff);
5902
5903         return data_size;
5904 }
5905
5906 /* We only need to move over in the address because the other
5907  * members of the RX descriptor are invariant.  See notes above
5908  * tg3_alloc_rx_data for full details.
5909  */
5910 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5911                            struct tg3_rx_prodring_set *dpr,
5912                            u32 opaque_key, int src_idx,
5913                            u32 dest_idx_unmasked)
5914 {
5915         struct tg3 *tp = tnapi->tp;
5916         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5917         struct ring_info *src_map, *dest_map;
5918         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5919         int dest_idx;
5920
5921         switch (opaque_key) {
5922         case RXD_OPAQUE_RING_STD:
5923                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5924                 dest_desc = &dpr->rx_std[dest_idx];
5925                 dest_map = &dpr->rx_std_buffers[dest_idx];
5926                 src_desc = &spr->rx_std[src_idx];
5927                 src_map = &spr->rx_std_buffers[src_idx];
5928                 break;
5929
5930         case RXD_OPAQUE_RING_JUMBO:
5931                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5932                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5933                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5934                 src_desc = &spr->rx_jmb[src_idx].std;
5935                 src_map = &spr->rx_jmb_buffers[src_idx];
5936                 break;
5937
5938         default:
5939                 return;
5940         }
5941
5942         dest_map->data = src_map->data;
5943         dma_unmap_addr_set(dest_map, mapping,
5944                            dma_unmap_addr(src_map, mapping));
5945         dest_desc->addr_hi = src_desc->addr_hi;
5946         dest_desc->addr_lo = src_desc->addr_lo;
5947
5948         /* Ensure that the update to the skb happens after the physical
5949          * addresses have been transferred to the new BD location.
5950          */
5951         smp_wmb();
5952
5953         src_map->data = NULL;
5954 }
5955
5956 /* The RX ring scheme is composed of multiple rings which post fresh
5957  * buffers to the chip, and one special ring the chip uses to report
5958  * status back to the host.
5959  *
5960  * The special ring reports the status of received packets to the
5961  * host.  The chip does not write into the original descriptor the
5962  * RX buffer was obtained from.  The chip simply takes the original
5963  * descriptor as provided by the host, updates the status and length
5964  * field, then writes this into the next status ring entry.
5965  *
5966  * Each ring the host uses to post buffers to the chip is described
5967  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5968  * it is first placed into the on-chip ram.  When the packet's length
5969  * is known, it walks down the TG3_BDINFO entries to select the ring.
5970  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5971  * which is within the range of the new packet's length is chosen.
5972  *
5973  * The "separate ring for rx status" scheme may sound queer, but it makes
5974  * sense from a cache coherency perspective.  If only the host writes
5975  * to the buffer post rings, and only the chip writes to the rx status
5976  * rings, then cache lines never move beyond shared-modified state.
5977  * If both the host and chip were to write into the same ring, cache line
5978  * eviction could occur since both entities want it in an exclusive state.
5979  */
5980 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5981 {
5982         struct tg3 *tp = tnapi->tp;
5983         u32 work_mask, rx_std_posted = 0;
5984         u32 std_prod_idx, jmb_prod_idx;
5985         u32 sw_idx = tnapi->rx_rcb_ptr;
5986         u16 hw_idx;
5987         int received;
5988         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5989
5990         hw_idx = *(tnapi->rx_rcb_prod_idx);
5991         /*
5992          * We need to order the read of hw_idx and the read of
5993          * the opaque cookie.
5994          */
5995         rmb();
5996         work_mask = 0;
5997         received = 0;
5998         std_prod_idx = tpr->rx_std_prod_idx;
5999         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6000         while (sw_idx != hw_idx && budget > 0) {
6001                 struct ring_info *ri;
6002                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6003                 unsigned int len;
6004                 struct sk_buff *skb;
6005                 dma_addr_t dma_addr;
6006                 u32 opaque_key, desc_idx, *post_ptr;
6007                 u8 *data;
6008
6009                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6010                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6011                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6012                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6013                         dma_addr = dma_unmap_addr(ri, mapping);
6014                         data = ri->data;
6015                         post_ptr = &std_prod_idx;
6016                         rx_std_posted++;
6017                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6018                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6019                         dma_addr = dma_unmap_addr(ri, mapping);
6020                         data = ri->data;
6021                         post_ptr = &jmb_prod_idx;
6022                 } else
6023                         goto next_pkt_nopost;
6024
6025                 work_mask |= opaque_key;
6026
6027                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
6028                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
6029                 drop_it:
6030                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6031                                        desc_idx, *post_ptr);
6032                 drop_it_no_recycle:
6033                         /* Other statistics kept track of by card. */
6034                         tp->rx_dropped++;
6035                         goto next_pkt;
6036                 }
6037
6038                 prefetch(data + TG3_RX_OFFSET(tp));
6039                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6040                       ETH_FCS_LEN;
6041
6042                 if (len > TG3_RX_COPY_THRESH(tp)) {
6043                         int skb_size;
6044                         unsigned int frag_size;
6045
6046                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6047                                                     *post_ptr, &frag_size);
6048                         if (skb_size < 0)
6049                                 goto drop_it;
6050
6051                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6052                                          PCI_DMA_FROMDEVICE);
6053
6054                         skb = build_skb(data, frag_size);
6055                         if (!skb) {
6056                                 tg3_frag_free(frag_size != 0, data);
6057                                 goto drop_it_no_recycle;
6058                         }
6059                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6060                         /* Ensure that the update to the data happens
6061                          * after the usage of the old DMA mapping.
6062                          */
6063                         smp_wmb();
6064
6065                         ri->data = NULL;
6066
6067                 } else {
6068                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6069                                        desc_idx, *post_ptr);
6070
6071                         skb = netdev_alloc_skb(tp->dev,
6072                                                len + TG3_RAW_IP_ALIGN);
6073                         if (skb == NULL)
6074                                 goto drop_it_no_recycle;
6075
6076                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6077                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6078                         memcpy(skb->data,
6079                                data + TG3_RX_OFFSET(tp),
6080                                len);
6081                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6082                 }
6083
6084                 skb_put(skb, len);
6085                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6086                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6087                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6088                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6089                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6090                 else
6091                         skb_checksum_none_assert(skb);
6092
6093                 skb->protocol = eth_type_trans(skb, tp->dev);
6094
6095                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6096                     skb->protocol != htons(ETH_P_8021Q)) {
6097                         dev_kfree_skb(skb);
6098                         goto drop_it_no_recycle;
6099                 }
6100
6101                 if (desc->type_flags & RXD_FLAG_VLAN &&
6102                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6103                         __vlan_hwaccel_put_tag(skb,
6104                                                desc->err_vlan & RXD_VLAN_MASK);
6105
6106                 napi_gro_receive(&tnapi->napi, skb);
6107
6108                 received++;
6109                 budget--;
6110
6111 next_pkt:
6112                 (*post_ptr)++;
6113
6114                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6115                         tpr->rx_std_prod_idx = std_prod_idx &
6116                                                tp->rx_std_ring_mask;
6117                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6118                                      tpr->rx_std_prod_idx);
6119                         work_mask &= ~RXD_OPAQUE_RING_STD;
6120                         rx_std_posted = 0;
6121                 }
6122 next_pkt_nopost:
6123                 sw_idx++;
6124                 sw_idx &= tp->rx_ret_ring_mask;
6125
6126                 /* Refresh hw_idx to see if there is new work */
6127                 if (sw_idx == hw_idx) {
6128                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6129                         rmb();
6130                 }
6131         }
6132
6133         /* ACK the status ring. */
6134         tnapi->rx_rcb_ptr = sw_idx;
6135         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6136
6137         /* Refill RX ring(s). */
6138         if (!tg3_flag(tp, ENABLE_RSS)) {
6139                 /* Sync BD data before updating mailbox */
6140                 wmb();
6141
6142                 if (work_mask & RXD_OPAQUE_RING_STD) {
6143                         tpr->rx_std_prod_idx = std_prod_idx &
6144                                                tp->rx_std_ring_mask;
6145                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6146                                      tpr->rx_std_prod_idx);
6147                 }
6148                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6149                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6150                                                tp->rx_jmb_ring_mask;
6151                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6152                                      tpr->rx_jmb_prod_idx);
6153                 }
6154                 mmiowb();
6155         } else if (work_mask) {
6156                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6157                  * updated before the producer indices can be updated.
6158                  */
6159                 smp_wmb();
6160
6161                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6162                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6163
6164                 if (tnapi != &tp->napi[1]) {
6165                         tp->rx_refill = true;
6166                         napi_schedule(&tp->napi[1].napi);
6167                 }
6168         }
6169
6170         return received;
6171 }
6172
6173 static void tg3_poll_link(struct tg3 *tp)
6174 {
6175         /* handle link change and other phy events */
6176         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6177                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6178
6179                 if (sblk->status & SD_STATUS_LINK_CHG) {
6180                         sblk->status = SD_STATUS_UPDATED |
6181                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6182                         spin_lock(&tp->lock);
6183                         if (tg3_flag(tp, USE_PHYLIB)) {
6184                                 tw32_f(MAC_STATUS,
6185                                      (MAC_STATUS_SYNC_CHANGED |
6186                                       MAC_STATUS_CFG_CHANGED |
6187                                       MAC_STATUS_MI_COMPLETION |
6188                                       MAC_STATUS_LNKSTATE_CHANGED));
6189                                 udelay(40);
6190                         } else
6191                                 tg3_setup_phy(tp, 0);
6192                         spin_unlock(&tp->lock);
6193                 }
6194         }
6195 }
6196
6197 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6198                                 struct tg3_rx_prodring_set *dpr,
6199                                 struct tg3_rx_prodring_set *spr)
6200 {
6201         u32 si, di, cpycnt, src_prod_idx;
6202         int i, err = 0;
6203
6204         while (1) {
6205                 src_prod_idx = spr->rx_std_prod_idx;
6206
6207                 /* Make sure updates to the rx_std_buffers[] entries and the
6208                  * standard producer index are seen in the correct order.
6209                  */
6210                 smp_rmb();
6211
6212                 if (spr->rx_std_cons_idx == src_prod_idx)
6213                         break;
6214
6215                 if (spr->rx_std_cons_idx < src_prod_idx)
6216                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6217                 else
6218                         cpycnt = tp->rx_std_ring_mask + 1 -
6219                                  spr->rx_std_cons_idx;
6220
6221                 cpycnt = min(cpycnt,
6222                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6223
6224                 si = spr->rx_std_cons_idx;
6225                 di = dpr->rx_std_prod_idx;
6226
6227                 for (i = di; i < di + cpycnt; i++) {
6228                         if (dpr->rx_std_buffers[i].data) {
6229                                 cpycnt = i - di;
6230                                 err = -ENOSPC;
6231                                 break;
6232                         }
6233                 }
6234
6235                 if (!cpycnt)
6236                         break;
6237
6238                 /* Ensure that updates to the rx_std_buffers ring and the
6239                  * shadowed hardware producer ring from tg3_recycle_skb() are
6240                  * ordered correctly WRT the skb check above.
6241                  */
6242                 smp_rmb();
6243
6244                 memcpy(&dpr->rx_std_buffers[di],
6245                        &spr->rx_std_buffers[si],
6246                        cpycnt * sizeof(struct ring_info));
6247
6248                 for (i = 0; i < cpycnt; i++, di++, si++) {
6249                         struct tg3_rx_buffer_desc *sbd, *dbd;
6250                         sbd = &spr->rx_std[si];
6251                         dbd = &dpr->rx_std[di];
6252                         dbd->addr_hi = sbd->addr_hi;
6253                         dbd->addr_lo = sbd->addr_lo;
6254                 }
6255
6256                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6257                                        tp->rx_std_ring_mask;
6258                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6259                                        tp->rx_std_ring_mask;
6260         }
6261
6262         while (1) {
6263                 src_prod_idx = spr->rx_jmb_prod_idx;
6264
6265                 /* Make sure updates to the rx_jmb_buffers[] entries and
6266                  * the jumbo producer index are seen in the correct order.
6267                  */
6268                 smp_rmb();
6269
6270                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6271                         break;
6272
6273                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6274                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6275                 else
6276                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6277                                  spr->rx_jmb_cons_idx;
6278
6279                 cpycnt = min(cpycnt,
6280                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6281
6282                 si = spr->rx_jmb_cons_idx;
6283                 di = dpr->rx_jmb_prod_idx;
6284
6285                 for (i = di; i < di + cpycnt; i++) {
6286                         if (dpr->rx_jmb_buffers[i].data) {
6287                                 cpycnt = i - di;
6288                                 err = -ENOSPC;
6289                                 break;
6290                         }
6291                 }
6292
6293                 if (!cpycnt)
6294                         break;
6295
6296                 /* Ensure that updates to the rx_jmb_buffers ring and the
6297                  * shadowed hardware producer ring from tg3_recycle_skb() are
6298                  * ordered correctly WRT the skb check above.
6299                  */
6300                 smp_rmb();
6301
6302                 memcpy(&dpr->rx_jmb_buffers[di],
6303                        &spr->rx_jmb_buffers[si],
6304                        cpycnt * sizeof(struct ring_info));
6305
6306                 for (i = 0; i < cpycnt; i++, di++, si++) {
6307                         struct tg3_rx_buffer_desc *sbd, *dbd;
6308                         sbd = &spr->rx_jmb[si].std;
6309                         dbd = &dpr->rx_jmb[di].std;
6310                         dbd->addr_hi = sbd->addr_hi;
6311                         dbd->addr_lo = sbd->addr_lo;
6312                 }
6313
6314                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6315                                        tp->rx_jmb_ring_mask;
6316                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6317                                        tp->rx_jmb_ring_mask;
6318         }
6319
6320         return err;
6321 }
6322
6323 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6324 {
6325         struct tg3 *tp = tnapi->tp;
6326
6327         /* run TX completion thread */
6328         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6329                 tg3_tx(tnapi);
6330                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6331                         return work_done;
6332         }
6333
6334         if (!tnapi->rx_rcb_prod_idx)
6335                 return work_done;
6336
6337         /* run RX thread, within the bounds set by NAPI.
6338          * All RX "locking" is done by ensuring outside
6339          * code synchronizes with tg3->napi.poll()
6340          */
6341         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6342                 work_done += tg3_rx(tnapi, budget - work_done);
6343
6344         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6345                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6346                 int i, err = 0;
6347                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6348                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6349
6350                 tp->rx_refill = false;
6351                 for (i = 1; i <= tp->rxq_cnt; i++)
6352                         err |= tg3_rx_prodring_xfer(tp, dpr,
6353                                                     &tp->napi[i].prodring);
6354
6355                 wmb();
6356
6357                 if (std_prod_idx != dpr->rx_std_prod_idx)
6358                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6359                                      dpr->rx_std_prod_idx);
6360
6361                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6362                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6363                                      dpr->rx_jmb_prod_idx);
6364
6365                 mmiowb();
6366
6367                 if (err)
6368                         tw32_f(HOSTCC_MODE, tp->coal_now);
6369         }
6370
6371         return work_done;
6372 }
6373
6374 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6375 {
6376         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6377                 schedule_work(&tp->reset_task);
6378 }
6379
6380 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6381 {
6382         cancel_work_sync(&tp->reset_task);
6383         tg3_flag_clear(tp, RESET_TASK_PENDING);
6384         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6385 }
6386
6387 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6388 {
6389         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6390         struct tg3 *tp = tnapi->tp;
6391         int work_done = 0;
6392         struct tg3_hw_status *sblk = tnapi->hw_status;
6393
6394         while (1) {
6395                 work_done = tg3_poll_work(tnapi, work_done, budget);
6396
6397                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6398                         goto tx_recovery;
6399
6400                 if (unlikely(work_done >= budget))
6401                         break;
6402
6403                 /* tp->last_tag is used in tg3_int_reenable() below
6404                  * to tell the hw how much work has been processed,
6405                  * so we must read it before checking for more work.
6406                  */
6407                 tnapi->last_tag = sblk->status_tag;
6408                 tnapi->last_irq_tag = tnapi->last_tag;
6409                 rmb();
6410
6411                 /* check for RX/TX work to do */
6412                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6413                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6414
6415                         /* This test here is not race free, but will reduce
6416                          * the number of interrupts by looping again.
6417                          */
6418                         if (tnapi == &tp->napi[1] && tp->rx_refill)
6419                                 continue;
6420
6421                         napi_complete(napi);
6422                         /* Reenable interrupts. */
6423                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6424
6425                         /* This test here is synchronized by napi_schedule()
6426                          * and napi_complete() to close the race condition.
6427                          */
6428                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
6429                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
6430                                                   HOSTCC_MODE_ENABLE |
6431                                                   tnapi->coal_now);
6432                         }
6433                         mmiowb();
6434                         break;
6435                 }
6436         }
6437
6438         return work_done;
6439
6440 tx_recovery:
6441         /* work_done is guaranteed to be less than budget. */
6442         napi_complete(napi);
6443         tg3_reset_task_schedule(tp);
6444         return work_done;
6445 }
6446
6447 static void tg3_process_error(struct tg3 *tp)
6448 {
6449         u32 val;
6450         bool real_error = false;
6451
6452         if (tg3_flag(tp, ERROR_PROCESSED))
6453                 return;
6454
6455         /* Check Flow Attention register */
6456         val = tr32(HOSTCC_FLOW_ATTN);
6457         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6458                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6459                 real_error = true;
6460         }
6461
6462         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6463                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6464                 real_error = true;
6465         }
6466
6467         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6468                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6469                 real_error = true;
6470         }
6471
6472         if (!real_error)
6473                 return;
6474
6475         tg3_dump_state(tp);
6476
6477         tg3_flag_set(tp, ERROR_PROCESSED);
6478         tg3_reset_task_schedule(tp);
6479 }
6480
6481 static int tg3_poll(struct napi_struct *napi, int budget)
6482 {
6483         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6484         struct tg3 *tp = tnapi->tp;
6485         int work_done = 0;
6486         struct tg3_hw_status *sblk = tnapi->hw_status;
6487
6488         while (1) {
6489                 if (sblk->status & SD_STATUS_ERROR)
6490                         tg3_process_error(tp);
6491
6492                 tg3_poll_link(tp);
6493
6494                 work_done = tg3_poll_work(tnapi, work_done, budget);
6495
6496                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6497                         goto tx_recovery;
6498
6499                 if (unlikely(work_done >= budget))
6500                         break;
6501
6502                 if (tg3_flag(tp, TAGGED_STATUS)) {
6503                         /* tp->last_tag is used in tg3_int_reenable() below
6504                          * to tell the hw how much work has been processed,
6505                          * so we must read it before checking for more work.
6506                          */
6507                         tnapi->last_tag = sblk->status_tag;
6508                         tnapi->last_irq_tag = tnapi->last_tag;
6509                         rmb();
6510                 } else
6511                         sblk->status &= ~SD_STATUS_UPDATED;
6512
6513                 if (likely(!tg3_has_work(tnapi))) {
6514                         napi_complete(napi);
6515                         tg3_int_reenable(tnapi);
6516                         break;
6517                 }
6518         }
6519
6520         return work_done;
6521
6522 tx_recovery:
6523         /* work_done is guaranteed to be less than budget. */
6524         napi_complete(napi);
6525         tg3_reset_task_schedule(tp);
6526         return work_done;
6527 }
6528
6529 static void tg3_napi_disable(struct tg3 *tp)
6530 {
6531         int i;
6532
6533         for (i = tp->irq_cnt - 1; i >= 0; i--)
6534                 napi_disable(&tp->napi[i].napi);
6535 }
6536
6537 static void tg3_napi_enable(struct tg3 *tp)
6538 {
6539         int i;
6540
6541         for (i = 0; i < tp->irq_cnt; i++)
6542                 napi_enable(&tp->napi[i].napi);
6543 }
6544
6545 static void tg3_napi_init(struct tg3 *tp)
6546 {
6547         int i;
6548
6549         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6550         for (i = 1; i < tp->irq_cnt; i++)
6551                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6552 }
6553
6554 static void tg3_napi_fini(struct tg3 *tp)
6555 {
6556         int i;
6557
6558         for (i = 0; i < tp->irq_cnt; i++)
6559                 netif_napi_del(&tp->napi[i].napi);
6560 }
6561
6562 static inline void tg3_netif_stop(struct tg3 *tp)
6563 {
6564         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6565         tg3_napi_disable(tp);
6566         netif_carrier_off(tp->dev);
6567         netif_tx_disable(tp->dev);
6568 }
6569
6570 /* tp->lock must be held */
6571 static inline void tg3_netif_start(struct tg3 *tp)
6572 {
6573         tg3_ptp_resume(tp);
6574
6575         /* NOTE: unconditional netif_tx_wake_all_queues is only
6576          * appropriate so long as all callers are assured to
6577          * have free tx slots (such as after tg3_init_hw)
6578          */
6579         netif_tx_wake_all_queues(tp->dev);
6580
6581         if (tp->link_up)
6582                 netif_carrier_on(tp->dev);
6583
6584         tg3_napi_enable(tp);
6585         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6586         tg3_enable_ints(tp);
6587 }
6588
6589 static void tg3_irq_quiesce(struct tg3 *tp)
6590 {
6591         int i;
6592
6593         BUG_ON(tp->irq_sync);
6594
6595         tp->irq_sync = 1;
6596         smp_mb();
6597
6598         for (i = 0; i < tp->irq_cnt; i++)
6599                 synchronize_irq(tp->napi[i].irq_vec);
6600 }
6601
6602 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6603  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6604  * with as well.  Most of the time, this is not necessary except when
6605  * shutting down the device.
6606  */
6607 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6608 {
6609         spin_lock_bh(&tp->lock);
6610         if (irq_sync)
6611                 tg3_irq_quiesce(tp);
6612 }
6613
6614 static inline void tg3_full_unlock(struct tg3 *tp)
6615 {
6616         spin_unlock_bh(&tp->lock);
6617 }
6618
6619 /* One-shot MSI handler - Chip automatically disables interrupt
6620  * after sending MSI so driver doesn't have to do it.
6621  */
6622 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6623 {
6624         struct tg3_napi *tnapi = dev_id;
6625         struct tg3 *tp = tnapi->tp;
6626
6627         prefetch(tnapi->hw_status);
6628         if (tnapi->rx_rcb)
6629                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6630
6631         if (likely(!tg3_irq_sync(tp)))
6632                 napi_schedule(&tnapi->napi);
6633
6634         return IRQ_HANDLED;
6635 }
6636
6637 /* MSI ISR - No need to check for interrupt sharing and no need to
6638  * flush status block and interrupt mailbox. PCI ordering rules
6639  * guarantee that MSI will arrive after the status block.
6640  */
6641 static irqreturn_t tg3_msi(int irq, void *dev_id)
6642 {
6643         struct tg3_napi *tnapi = dev_id;
6644         struct tg3 *tp = tnapi->tp;
6645
6646         prefetch(tnapi->hw_status);
6647         if (tnapi->rx_rcb)
6648                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6649         /*
6650          * Writing any value to intr-mbox-0 clears PCI INTA# and
6651          * chip-internal interrupt pending events.
6652          * Writing non-zero to intr-mbox-0 additional tells the
6653          * NIC to stop sending us irqs, engaging "in-intr-handler"
6654          * event coalescing.
6655          */
6656         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6657         if (likely(!tg3_irq_sync(tp)))
6658                 napi_schedule(&tnapi->napi);
6659
6660         return IRQ_RETVAL(1);
6661 }
6662
6663 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6664 {
6665         struct tg3_napi *tnapi = dev_id;
6666         struct tg3 *tp = tnapi->tp;
6667         struct tg3_hw_status *sblk = tnapi->hw_status;
6668         unsigned int handled = 1;
6669
6670         /* In INTx mode, it is possible for the interrupt to arrive at
6671          * the CPU before the status block posted prior to the interrupt.
6672          * Reading the PCI State register will confirm whether the
6673          * interrupt is ours and will flush the status block.
6674          */
6675         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6676                 if (tg3_flag(tp, CHIP_RESETTING) ||
6677                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6678                         handled = 0;
6679                         goto out;
6680                 }
6681         }
6682
6683         /*
6684          * Writing any value to intr-mbox-0 clears PCI INTA# and
6685          * chip-internal interrupt pending events.
6686          * Writing non-zero to intr-mbox-0 additional tells the
6687          * NIC to stop sending us irqs, engaging "in-intr-handler"
6688          * event coalescing.
6689          *
6690          * Flush the mailbox to de-assert the IRQ immediately to prevent
6691          * spurious interrupts.  The flush impacts performance but
6692          * excessive spurious interrupts can be worse in some cases.
6693          */
6694         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6695         if (tg3_irq_sync(tp))
6696                 goto out;
6697         sblk->status &= ~SD_STATUS_UPDATED;
6698         if (likely(tg3_has_work(tnapi))) {
6699                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6700                 napi_schedule(&tnapi->napi);
6701         } else {
6702                 /* No work, shared interrupt perhaps?  re-enable
6703                  * interrupts, and flush that PCI write
6704                  */
6705                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6706                                0x00000000);
6707         }
6708 out:
6709         return IRQ_RETVAL(handled);
6710 }
6711
6712 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6713 {
6714         struct tg3_napi *tnapi = dev_id;
6715         struct tg3 *tp = tnapi->tp;
6716         struct tg3_hw_status *sblk = tnapi->hw_status;
6717         unsigned int handled = 1;
6718
6719         /* In INTx mode, it is possible for the interrupt to arrive at
6720          * the CPU before the status block posted prior to the interrupt.
6721          * Reading the PCI State register will confirm whether the
6722          * interrupt is ours and will flush the status block.
6723          */
6724         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6725                 if (tg3_flag(tp, CHIP_RESETTING) ||
6726                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6727                         handled = 0;
6728                         goto out;
6729                 }
6730         }
6731
6732         /*
6733          * writing any value to intr-mbox-0 clears PCI INTA# and
6734          * chip-internal interrupt pending events.
6735          * writing non-zero to intr-mbox-0 additional tells the
6736          * NIC to stop sending us irqs, engaging "in-intr-handler"
6737          * event coalescing.
6738          *
6739          * Flush the mailbox to de-assert the IRQ immediately to prevent
6740          * spurious interrupts.  The flush impacts performance but
6741          * excessive spurious interrupts can be worse in some cases.
6742          */
6743         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6744
6745         /*
6746          * In a shared interrupt configuration, sometimes other devices'
6747          * interrupts will scream.  We record the current status tag here
6748          * so that the above check can report that the screaming interrupts
6749          * are unhandled.  Eventually they will be silenced.
6750          */
6751         tnapi->last_irq_tag = sblk->status_tag;
6752
6753         if (tg3_irq_sync(tp))
6754                 goto out;
6755
6756         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6757
6758         napi_schedule(&tnapi->napi);
6759
6760 out:
6761         return IRQ_RETVAL(handled);
6762 }
6763
6764 /* ISR for interrupt test */
6765 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6766 {
6767         struct tg3_napi *tnapi = dev_id;
6768         struct tg3 *tp = tnapi->tp;
6769         struct tg3_hw_status *sblk = tnapi->hw_status;
6770
6771         if ((sblk->status & SD_STATUS_UPDATED) ||
6772             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6773                 tg3_disable_ints(tp);
6774                 return IRQ_RETVAL(1);
6775         }
6776         return IRQ_RETVAL(0);
6777 }
6778
6779 #ifdef CONFIG_NET_POLL_CONTROLLER
6780 static void tg3_poll_controller(struct net_device *dev)
6781 {
6782         int i;
6783         struct tg3 *tp = netdev_priv(dev);
6784
6785         for (i = 0; i < tp->irq_cnt; i++)
6786                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6787 }
6788 #endif
6789
6790 static void tg3_tx_timeout(struct net_device *dev)
6791 {
6792         struct tg3 *tp = netdev_priv(dev);
6793
6794         if (netif_msg_tx_err(tp)) {
6795                 netdev_err(dev, "transmit timed out, resetting\n");
6796                 tg3_dump_state(tp);
6797         }
6798
6799         tg3_reset_task_schedule(tp);
6800 }
6801
6802 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6803 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6804 {
6805         u32 base = (u32) mapping & 0xffffffff;
6806
6807         return (base > 0xffffdcc0) && (base + len + 8 < base);
6808 }
6809
6810 /* Test for DMA addresses > 40-bit */
6811 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6812                                           int len)
6813 {
6814 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6815         if (tg3_flag(tp, 40BIT_DMA_BUG))
6816                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6817         return 0;
6818 #else
6819         return 0;
6820 #endif
6821 }
6822
6823 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6824                                  dma_addr_t mapping, u32 len, u32 flags,
6825                                  u32 mss, u32 vlan)
6826 {
6827         txbd->addr_hi = ((u64) mapping >> 32);
6828         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6829         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6830         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6831 }
6832
6833 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6834                             dma_addr_t map, u32 len, u32 flags,
6835                             u32 mss, u32 vlan)
6836 {
6837         struct tg3 *tp = tnapi->tp;
6838         bool hwbug = false;
6839
6840         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6841                 hwbug = true;
6842
6843         if (tg3_4g_overflow_test(map, len))
6844                 hwbug = true;
6845
6846         if (tg3_40bit_overflow_test(tp, map, len))
6847                 hwbug = true;
6848
6849         if (tp->dma_limit) {
6850                 u32 prvidx = *entry;
6851                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6852                 while (len > tp->dma_limit && *budget) {
6853                         u32 frag_len = tp->dma_limit;
6854                         len -= tp->dma_limit;
6855
6856                         /* Avoid the 8byte DMA problem */
6857                         if (len <= 8) {
6858                                 len += tp->dma_limit / 2;
6859                                 frag_len = tp->dma_limit / 2;
6860                         }
6861
6862                         tnapi->tx_buffers[*entry].fragmented = true;
6863
6864                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6865                                       frag_len, tmp_flag, mss, vlan);
6866                         *budget -= 1;
6867                         prvidx = *entry;
6868                         *entry = NEXT_TX(*entry);
6869
6870                         map += frag_len;
6871                 }
6872
6873                 if (len) {
6874                         if (*budget) {
6875                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6876                                               len, flags, mss, vlan);
6877                                 *budget -= 1;
6878                                 *entry = NEXT_TX(*entry);
6879                         } else {
6880                                 hwbug = true;
6881                                 tnapi->tx_buffers[prvidx].fragmented = false;
6882                         }
6883                 }
6884         } else {
6885                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6886                               len, flags, mss, vlan);
6887                 *entry = NEXT_TX(*entry);
6888         }
6889
6890         return hwbug;
6891 }
6892
6893 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6894 {
6895         int i;
6896         struct sk_buff *skb;
6897         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6898
6899         skb = txb->skb;
6900         txb->skb = NULL;
6901
6902         pci_unmap_single(tnapi->tp->pdev,
6903                          dma_unmap_addr(txb, mapping),
6904                          skb_headlen(skb),
6905                          PCI_DMA_TODEVICE);
6906
6907         while (txb->fragmented) {
6908                 txb->fragmented = false;
6909                 entry = NEXT_TX(entry);
6910                 txb = &tnapi->tx_buffers[entry];
6911         }
6912
6913         for (i = 0; i <= last; i++) {
6914                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6915
6916                 entry = NEXT_TX(entry);
6917                 txb = &tnapi->tx_buffers[entry];
6918
6919                 pci_unmap_page(tnapi->tp->pdev,
6920                                dma_unmap_addr(txb, mapping),
6921                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6922
6923                 while (txb->fragmented) {
6924                         txb->fragmented = false;
6925                         entry = NEXT_TX(entry);
6926                         txb = &tnapi->tx_buffers[entry];
6927                 }
6928         }
6929 }
6930
6931 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6932 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6933                                        struct sk_buff **pskb,
6934                                        u32 *entry, u32 *budget,
6935                                        u32 base_flags, u32 mss, u32 vlan)
6936 {
6937         struct tg3 *tp = tnapi->tp;
6938         struct sk_buff *new_skb, *skb = *pskb;
6939         dma_addr_t new_addr = 0;
6940         int ret = 0;
6941
6942         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6943                 new_skb = skb_copy(skb, GFP_ATOMIC);
6944         else {
6945                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6946
6947                 new_skb = skb_copy_expand(skb,
6948                                           skb_headroom(skb) + more_headroom,
6949                                           skb_tailroom(skb), GFP_ATOMIC);
6950         }
6951
6952         if (!new_skb) {
6953                 ret = -1;
6954         } else {
6955                 /* New SKB is guaranteed to be linear. */
6956                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6957                                           PCI_DMA_TODEVICE);
6958                 /* Make sure the mapping succeeded */
6959                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6960                         dev_kfree_skb(new_skb);
6961                         ret = -1;
6962                 } else {
6963                         u32 save_entry = *entry;
6964
6965                         base_flags |= TXD_FLAG_END;
6966
6967                         tnapi->tx_buffers[*entry].skb = new_skb;
6968                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6969                                            mapping, new_addr);
6970
6971                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6972                                             new_skb->len, base_flags,
6973                                             mss, vlan)) {
6974                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6975                                 dev_kfree_skb(new_skb);
6976                                 ret = -1;
6977                         }
6978                 }
6979         }
6980
6981         dev_kfree_skb(skb);
6982         *pskb = new_skb;
6983         return ret;
6984 }
6985
6986 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6987
6988 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6989  * TSO header is greater than 80 bytes.
6990  */
6991 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6992 {
6993         struct sk_buff *segs, *nskb;
6994         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6995
6996         /* Estimate the number of fragments in the worst case */
6997         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6998                 netif_stop_queue(tp->dev);
6999
7000                 /* netif_tx_stop_queue() must be done before checking
7001                  * checking tx index in tg3_tx_avail() below, because in
7002                  * tg3_tx(), we update tx index before checking for
7003                  * netif_tx_queue_stopped().
7004                  */
7005                 smp_mb();
7006                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7007                         return NETDEV_TX_BUSY;
7008
7009                 netif_wake_queue(tp->dev);
7010         }
7011
7012         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7013         if (IS_ERR(segs))
7014                 goto tg3_tso_bug_end;
7015
7016         do {
7017                 nskb = segs;
7018                 segs = segs->next;
7019                 nskb->next = NULL;
7020                 tg3_start_xmit(nskb, tp->dev);
7021         } while (segs);
7022
7023 tg3_tso_bug_end:
7024         dev_kfree_skb(skb);
7025
7026         return NETDEV_TX_OK;
7027 }
7028
7029 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7030  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7031  */
7032 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7033 {
7034         struct tg3 *tp = netdev_priv(dev);
7035         u32 len, entry, base_flags, mss, vlan = 0;
7036         u32 budget;
7037         int i = -1, would_hit_hwbug;
7038         dma_addr_t mapping;
7039         struct tg3_napi *tnapi;
7040         struct netdev_queue *txq;
7041         unsigned int last;
7042
7043         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7044         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7045         if (tg3_flag(tp, ENABLE_TSS))
7046                 tnapi++;
7047
7048         budget = tg3_tx_avail(tnapi);
7049
7050         /* We are running in BH disabled context with netif_tx_lock
7051          * and TX reclaim runs via tp->napi.poll inside of a software
7052          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7053          * no IRQ context deadlocks to worry about either.  Rejoice!
7054          */
7055         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7056                 if (!netif_tx_queue_stopped(txq)) {
7057                         netif_tx_stop_queue(txq);
7058
7059                         /* This is a hard error, log it. */
7060                         netdev_err(dev,
7061                                    "BUG! Tx Ring full when queue awake!\n");
7062                 }
7063                 return NETDEV_TX_BUSY;
7064         }
7065
7066         entry = tnapi->tx_prod;
7067         base_flags = 0;
7068         if (skb->ip_summed == CHECKSUM_PARTIAL)
7069                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7070
7071         mss = skb_shinfo(skb)->gso_size;
7072         if (mss) {
7073                 struct iphdr *iph;
7074                 u32 tcp_opt_len, hdr_len;
7075
7076                 if (skb_header_cloned(skb) &&
7077                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7078                         goto drop;
7079
7080                 iph = ip_hdr(skb);
7081                 tcp_opt_len = tcp_optlen(skb);
7082
7083                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7084
7085                 if (!skb_is_gso_v6(skb)) {
7086                         iph->check = 0;
7087                         iph->tot_len = htons(mss + hdr_len);
7088                 }
7089
7090                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7091                     tg3_flag(tp, TSO_BUG))
7092                         return tg3_tso_bug(tp, skb);
7093
7094                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7095                                TXD_FLAG_CPU_POST_DMA);
7096
7097                 if (tg3_flag(tp, HW_TSO_1) ||
7098                     tg3_flag(tp, HW_TSO_2) ||
7099                     tg3_flag(tp, HW_TSO_3)) {
7100                         tcp_hdr(skb)->check = 0;
7101                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7102                 } else
7103                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7104                                                                  iph->daddr, 0,
7105                                                                  IPPROTO_TCP,
7106                                                                  0);
7107
7108                 if (tg3_flag(tp, HW_TSO_3)) {
7109                         mss |= (hdr_len & 0xc) << 12;
7110                         if (hdr_len & 0x10)
7111                                 base_flags |= 0x00000010;
7112                         base_flags |= (hdr_len & 0x3e0) << 5;
7113                 } else if (tg3_flag(tp, HW_TSO_2))
7114                         mss |= hdr_len << 9;
7115                 else if (tg3_flag(tp, HW_TSO_1) ||
7116                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7117                         if (tcp_opt_len || iph->ihl > 5) {
7118                                 int tsflags;
7119
7120                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7121                                 mss |= (tsflags << 11);
7122                         }
7123                 } else {
7124                         if (tcp_opt_len || iph->ihl > 5) {
7125                                 int tsflags;
7126
7127                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7128                                 base_flags |= tsflags << 12;
7129                         }
7130                 }
7131         }
7132
7133         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7134             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7135                 base_flags |= TXD_FLAG_JMB_PKT;
7136
7137         if (vlan_tx_tag_present(skb)) {
7138                 base_flags |= TXD_FLAG_VLAN;
7139                 vlan = vlan_tx_tag_get(skb);
7140         }
7141
7142         len = skb_headlen(skb);
7143
7144         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7145         if (pci_dma_mapping_error(tp->pdev, mapping))
7146                 goto drop;
7147
7148
7149         tnapi->tx_buffers[entry].skb = skb;
7150         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7151
7152         would_hit_hwbug = 0;
7153
7154         if (tg3_flag(tp, 5701_DMA_BUG))
7155                 would_hit_hwbug = 1;
7156
7157         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7158                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7159                             mss, vlan)) {
7160                 would_hit_hwbug = 1;
7161         } else if (skb_shinfo(skb)->nr_frags > 0) {
7162                 u32 tmp_mss = mss;
7163
7164                 if (!tg3_flag(tp, HW_TSO_1) &&
7165                     !tg3_flag(tp, HW_TSO_2) &&
7166                     !tg3_flag(tp, HW_TSO_3))
7167                         tmp_mss = 0;
7168
7169                 /* Now loop through additional data
7170                  * fragments, and queue them.
7171                  */
7172                 last = skb_shinfo(skb)->nr_frags - 1;
7173                 for (i = 0; i <= last; i++) {
7174                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7175
7176                         len = skb_frag_size(frag);
7177                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7178                                                    len, DMA_TO_DEVICE);
7179
7180                         tnapi->tx_buffers[entry].skb = NULL;
7181                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7182                                            mapping);
7183                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7184                                 goto dma_error;
7185
7186                         if (!budget ||
7187                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7188                                             len, base_flags |
7189                                             ((i == last) ? TXD_FLAG_END : 0),
7190                                             tmp_mss, vlan)) {
7191                                 would_hit_hwbug = 1;
7192                                 break;
7193                         }
7194                 }
7195         }
7196
7197         if (would_hit_hwbug) {
7198                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7199
7200                 /* If the workaround fails due to memory/mapping
7201                  * failure, silently drop this packet.
7202                  */
7203                 entry = tnapi->tx_prod;
7204                 budget = tg3_tx_avail(tnapi);
7205                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7206                                                 base_flags, mss, vlan))
7207                         goto drop_nofree;
7208         }
7209
7210         skb_tx_timestamp(skb);
7211         netdev_tx_sent_queue(txq, skb->len);
7212
7213         /* Sync BD data before updating mailbox */
7214         wmb();
7215
7216         /* Packets are ready, update Tx producer idx local and on card. */
7217         tw32_tx_mbox(tnapi->prodmbox, entry);
7218
7219         tnapi->tx_prod = entry;
7220         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7221                 netif_tx_stop_queue(txq);
7222
7223                 /* netif_tx_stop_queue() must be done before checking
7224                  * checking tx index in tg3_tx_avail() below, because in
7225                  * tg3_tx(), we update tx index before checking for
7226                  * netif_tx_queue_stopped().
7227                  */
7228                 smp_mb();
7229                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7230                         netif_tx_wake_queue(txq);
7231         }
7232
7233         mmiowb();
7234         return NETDEV_TX_OK;
7235
7236 dma_error:
7237         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7238         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7239 drop:
7240         dev_kfree_skb(skb);
7241 drop_nofree:
7242         tp->tx_dropped++;
7243         return NETDEV_TX_OK;
7244 }
7245
7246 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7247 {
7248         if (enable) {
7249                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7250                                   MAC_MODE_PORT_MODE_MASK);
7251
7252                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7253
7254                 if (!tg3_flag(tp, 5705_PLUS))
7255                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7256
7257                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7258                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7259                 else
7260                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7261         } else {
7262                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7263
7264                 if (tg3_flag(tp, 5705_PLUS) ||
7265                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7266                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7267                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7268         }
7269
7270         tw32(MAC_MODE, tp->mac_mode);
7271         udelay(40);
7272 }
7273
7274 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7275 {
7276         u32 val, bmcr, mac_mode, ptest = 0;
7277
7278         tg3_phy_toggle_apd(tp, false);
7279         tg3_phy_toggle_automdix(tp, 0);
7280
7281         if (extlpbk && tg3_phy_set_extloopbk(tp))
7282                 return -EIO;
7283
7284         bmcr = BMCR_FULLDPLX;
7285         switch (speed) {
7286         case SPEED_10:
7287                 break;
7288         case SPEED_100:
7289                 bmcr |= BMCR_SPEED100;
7290                 break;
7291         case SPEED_1000:
7292         default:
7293                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7294                         speed = SPEED_100;
7295                         bmcr |= BMCR_SPEED100;
7296                 } else {
7297                         speed = SPEED_1000;
7298                         bmcr |= BMCR_SPEED1000;
7299                 }
7300         }
7301
7302         if (extlpbk) {
7303                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7304                         tg3_readphy(tp, MII_CTRL1000, &val);
7305                         val |= CTL1000_AS_MASTER |
7306                                CTL1000_ENABLE_MASTER;
7307                         tg3_writephy(tp, MII_CTRL1000, val);
7308                 } else {
7309                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7310                                 MII_TG3_FET_PTEST_TRIM_2;
7311                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7312                 }
7313         } else
7314                 bmcr |= BMCR_LOOPBACK;
7315
7316         tg3_writephy(tp, MII_BMCR, bmcr);
7317
7318         /* The write needs to be flushed for the FETs */
7319         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7320                 tg3_readphy(tp, MII_BMCR, &bmcr);
7321
7322         udelay(40);
7323
7324         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7325             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7326                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7327                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7328                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7329
7330                 /* The write needs to be flushed for the AC131 */
7331                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7332         }
7333
7334         /* Reset to prevent losing 1st rx packet intermittently */
7335         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7336             tg3_flag(tp, 5780_CLASS)) {
7337                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7338                 udelay(10);
7339                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7340         }
7341
7342         mac_mode = tp->mac_mode &
7343                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7344         if (speed == SPEED_1000)
7345                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7346         else
7347                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7348
7349         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7350                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7351
7352                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7353                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7354                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7355                         mac_mode |= MAC_MODE_LINK_POLARITY;
7356
7357                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7358                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7359         }
7360
7361         tw32(MAC_MODE, mac_mode);
7362         udelay(40);
7363
7364         return 0;
7365 }
7366
7367 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7368 {
7369         struct tg3 *tp = netdev_priv(dev);
7370
7371         if (features & NETIF_F_LOOPBACK) {
7372                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7373                         return;
7374
7375                 spin_lock_bh(&tp->lock);
7376                 tg3_mac_loopback(tp, true);
7377                 netif_carrier_on(tp->dev);
7378                 spin_unlock_bh(&tp->lock);
7379                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7380         } else {
7381                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7382                         return;
7383
7384                 spin_lock_bh(&tp->lock);
7385                 tg3_mac_loopback(tp, false);
7386                 /* Force link status check */
7387                 tg3_setup_phy(tp, 1);
7388                 spin_unlock_bh(&tp->lock);
7389                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7390         }
7391 }
7392
7393 static netdev_features_t tg3_fix_features(struct net_device *dev,
7394         netdev_features_t features)
7395 {
7396         struct tg3 *tp = netdev_priv(dev);
7397
7398         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7399                 features &= ~NETIF_F_ALL_TSO;
7400
7401         return features;
7402 }
7403
7404 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7405 {
7406         netdev_features_t changed = dev->features ^ features;
7407
7408         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7409                 tg3_set_loopback(dev, features);
7410
7411         return 0;
7412 }
7413
7414 static void tg3_rx_prodring_free(struct tg3 *tp,
7415                                  struct tg3_rx_prodring_set *tpr)
7416 {
7417         int i;
7418
7419         if (tpr != &tp->napi[0].prodring) {
7420                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7421                      i = (i + 1) & tp->rx_std_ring_mask)
7422                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7423                                         tp->rx_pkt_map_sz);
7424
7425                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7426                         for (i = tpr->rx_jmb_cons_idx;
7427                              i != tpr->rx_jmb_prod_idx;
7428                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7429                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7430                                                 TG3_RX_JMB_MAP_SZ);
7431                         }
7432                 }
7433
7434                 return;
7435         }
7436
7437         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7438                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7439                                 tp->rx_pkt_map_sz);
7440
7441         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7442                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7443                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7444                                         TG3_RX_JMB_MAP_SZ);
7445         }
7446 }
7447
7448 /* Initialize rx rings for packet processing.
7449  *
7450  * The chip has been shut down and the driver detached from
7451  * the networking, so no interrupts or new tx packets will
7452  * end up in the driver.  tp->{tx,}lock are held and thus
7453  * we may not sleep.
7454  */
7455 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7456                                  struct tg3_rx_prodring_set *tpr)
7457 {
7458         u32 i, rx_pkt_dma_sz;
7459
7460         tpr->rx_std_cons_idx = 0;
7461         tpr->rx_std_prod_idx = 0;
7462         tpr->rx_jmb_cons_idx = 0;
7463         tpr->rx_jmb_prod_idx = 0;
7464
7465         if (tpr != &tp->napi[0].prodring) {
7466                 memset(&tpr->rx_std_buffers[0], 0,
7467                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7468                 if (tpr->rx_jmb_buffers)
7469                         memset(&tpr->rx_jmb_buffers[0], 0,
7470                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7471                 goto done;
7472         }
7473
7474         /* Zero out all descriptors. */
7475         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7476
7477         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7478         if (tg3_flag(tp, 5780_CLASS) &&
7479             tp->dev->mtu > ETH_DATA_LEN)
7480                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7481         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7482
7483         /* Initialize invariants of the rings, we only set this
7484          * stuff once.  This works because the card does not
7485          * write into the rx buffer posting rings.
7486          */
7487         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7488                 struct tg3_rx_buffer_desc *rxd;
7489
7490                 rxd = &tpr->rx_std[i];
7491                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7492                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7493                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7494                                (i << RXD_OPAQUE_INDEX_SHIFT));
7495         }
7496
7497         /* Now allocate fresh SKBs for each rx ring. */
7498         for (i = 0; i < tp->rx_pending; i++) {
7499                 unsigned int frag_size;
7500
7501                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7502                                       &frag_size) < 0) {
7503                         netdev_warn(tp->dev,
7504                                     "Using a smaller RX standard ring. Only "
7505                                     "%d out of %d buffers were allocated "
7506                                     "successfully\n", i, tp->rx_pending);
7507                         if (i == 0)
7508                                 goto initfail;
7509                         tp->rx_pending = i;
7510                         break;
7511                 }
7512         }
7513
7514         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7515                 goto done;
7516
7517         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7518
7519         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7520                 goto done;
7521
7522         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7523                 struct tg3_rx_buffer_desc *rxd;
7524
7525                 rxd = &tpr->rx_jmb[i].std;
7526                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7527                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7528                                   RXD_FLAG_JUMBO;
7529                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7530                        (i << RXD_OPAQUE_INDEX_SHIFT));
7531         }
7532
7533         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7534                 unsigned int frag_size;
7535
7536                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7537                                       &frag_size) < 0) {
7538                         netdev_warn(tp->dev,
7539                                     "Using a smaller RX jumbo ring. Only %d "
7540                                     "out of %d buffers were allocated "
7541                                     "successfully\n", i, tp->rx_jumbo_pending);
7542                         if (i == 0)
7543                                 goto initfail;
7544                         tp->rx_jumbo_pending = i;
7545                         break;
7546                 }
7547         }
7548
7549 done:
7550         return 0;
7551
7552 initfail:
7553         tg3_rx_prodring_free(tp, tpr);
7554         return -ENOMEM;
7555 }
7556
7557 static void tg3_rx_prodring_fini(struct tg3 *tp,
7558                                  struct tg3_rx_prodring_set *tpr)
7559 {
7560         kfree(tpr->rx_std_buffers);
7561         tpr->rx_std_buffers = NULL;
7562         kfree(tpr->rx_jmb_buffers);
7563         tpr->rx_jmb_buffers = NULL;
7564         if (tpr->rx_std) {
7565                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7566                                   tpr->rx_std, tpr->rx_std_mapping);
7567                 tpr->rx_std = NULL;
7568         }
7569         if (tpr->rx_jmb) {
7570                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7571                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7572                 tpr->rx_jmb = NULL;
7573         }
7574 }
7575
7576 static int tg3_rx_prodring_init(struct tg3 *tp,
7577                                 struct tg3_rx_prodring_set *tpr)
7578 {
7579         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7580                                       GFP_KERNEL);
7581         if (!tpr->rx_std_buffers)
7582                 return -ENOMEM;
7583
7584         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7585                                          TG3_RX_STD_RING_BYTES(tp),
7586                                          &tpr->rx_std_mapping,
7587                                          GFP_KERNEL);
7588         if (!tpr->rx_std)
7589                 goto err_out;
7590
7591         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7592                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7593                                               GFP_KERNEL);
7594                 if (!tpr->rx_jmb_buffers)
7595                         goto err_out;
7596
7597                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7598                                                  TG3_RX_JMB_RING_BYTES(tp),
7599                                                  &tpr->rx_jmb_mapping,
7600                                                  GFP_KERNEL);
7601                 if (!tpr->rx_jmb)
7602                         goto err_out;
7603         }
7604
7605         return 0;
7606
7607 err_out:
7608         tg3_rx_prodring_fini(tp, tpr);
7609         return -ENOMEM;
7610 }
7611
7612 /* Free up pending packets in all rx/tx rings.
7613  *
7614  * The chip has been shut down and the driver detached from
7615  * the networking, so no interrupts or new tx packets will
7616  * end up in the driver.  tp->{tx,}lock is not held and we are not
7617  * in an interrupt context and thus may sleep.
7618  */
7619 static void tg3_free_rings(struct tg3 *tp)
7620 {
7621         int i, j;
7622
7623         for (j = 0; j < tp->irq_cnt; j++) {
7624                 struct tg3_napi *tnapi = &tp->napi[j];
7625
7626                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7627
7628                 if (!tnapi->tx_buffers)
7629                         continue;
7630
7631                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7632                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7633
7634                         if (!skb)
7635                                 continue;
7636
7637                         tg3_tx_skb_unmap(tnapi, i,
7638                                          skb_shinfo(skb)->nr_frags - 1);
7639
7640                         dev_kfree_skb_any(skb);
7641                 }
7642                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7643         }
7644 }
7645
7646 /* Initialize tx/rx rings for packet processing.
7647  *
7648  * The chip has been shut down and the driver detached from
7649  * the networking, so no interrupts or new tx packets will
7650  * end up in the driver.  tp->{tx,}lock are held and thus
7651  * we may not sleep.
7652  */
7653 static int tg3_init_rings(struct tg3 *tp)
7654 {
7655         int i;
7656
7657         /* Free up all the SKBs. */
7658         tg3_free_rings(tp);
7659
7660         for (i = 0; i < tp->irq_cnt; i++) {
7661                 struct tg3_napi *tnapi = &tp->napi[i];
7662
7663                 tnapi->last_tag = 0;
7664                 tnapi->last_irq_tag = 0;
7665                 tnapi->hw_status->status = 0;
7666                 tnapi->hw_status->status_tag = 0;
7667                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7668
7669                 tnapi->tx_prod = 0;
7670                 tnapi->tx_cons = 0;
7671                 if (tnapi->tx_ring)
7672                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7673
7674                 tnapi->rx_rcb_ptr = 0;
7675                 if (tnapi->rx_rcb)
7676                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7677
7678                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7679                         tg3_free_rings(tp);
7680                         return -ENOMEM;
7681                 }
7682         }
7683
7684         return 0;
7685 }
7686
7687 static void tg3_mem_tx_release(struct tg3 *tp)
7688 {
7689         int i;
7690
7691         for (i = 0; i < tp->irq_max; i++) {
7692                 struct tg3_napi *tnapi = &tp->napi[i];
7693
7694                 if (tnapi->tx_ring) {
7695                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7696                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7697                         tnapi->tx_ring = NULL;
7698                 }
7699
7700                 kfree(tnapi->tx_buffers);
7701                 tnapi->tx_buffers = NULL;
7702         }
7703 }
7704
7705 static int tg3_mem_tx_acquire(struct tg3 *tp)
7706 {
7707         int i;
7708         struct tg3_napi *tnapi = &tp->napi[0];
7709
7710         /* If multivector TSS is enabled, vector 0 does not handle
7711          * tx interrupts.  Don't allocate any resources for it.
7712          */
7713         if (tg3_flag(tp, ENABLE_TSS))
7714                 tnapi++;
7715
7716         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
7717                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
7718                                             TG3_TX_RING_SIZE, GFP_KERNEL);
7719                 if (!tnapi->tx_buffers)
7720                         goto err_out;
7721
7722                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7723                                                     TG3_TX_RING_BYTES,
7724                                                     &tnapi->tx_desc_mapping,
7725                                                     GFP_KERNEL);
7726                 if (!tnapi->tx_ring)
7727                         goto err_out;
7728         }
7729
7730         return 0;
7731
7732 err_out:
7733         tg3_mem_tx_release(tp);
7734         return -ENOMEM;
7735 }
7736
7737 static void tg3_mem_rx_release(struct tg3 *tp)
7738 {
7739         int i;
7740
7741         for (i = 0; i < tp->irq_max; i++) {
7742                 struct tg3_napi *tnapi = &tp->napi[i];
7743
7744                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7745
7746                 if (!tnapi->rx_rcb)
7747                         continue;
7748
7749                 dma_free_coherent(&tp->pdev->dev,
7750                                   TG3_RX_RCB_RING_BYTES(tp),
7751                                   tnapi->rx_rcb,
7752                                   tnapi->rx_rcb_mapping);
7753                 tnapi->rx_rcb = NULL;
7754         }
7755 }
7756
7757 static int tg3_mem_rx_acquire(struct tg3 *tp)
7758 {
7759         unsigned int i, limit;
7760
7761         limit = tp->rxq_cnt;
7762
7763         /* If RSS is enabled, we need a (dummy) producer ring
7764          * set on vector zero.  This is the true hw prodring.
7765          */
7766         if (tg3_flag(tp, ENABLE_RSS))
7767                 limit++;
7768
7769         for (i = 0; i < limit; i++) {
7770                 struct tg3_napi *tnapi = &tp->napi[i];
7771
7772                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7773                         goto err_out;
7774
7775                 /* If multivector RSS is enabled, vector 0
7776                  * does not handle rx or tx interrupts.
7777                  * Don't allocate any resources for it.
7778                  */
7779                 if (!i && tg3_flag(tp, ENABLE_RSS))
7780                         continue;
7781
7782                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7783                                                    TG3_RX_RCB_RING_BYTES(tp),
7784                                                    &tnapi->rx_rcb_mapping,
7785                                                    GFP_KERNEL);
7786                 if (!tnapi->rx_rcb)
7787                         goto err_out;
7788
7789                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7790         }
7791
7792         return 0;
7793
7794 err_out:
7795         tg3_mem_rx_release(tp);
7796         return -ENOMEM;
7797 }
7798
7799 /*
7800  * Must not be invoked with interrupt sources disabled and
7801  * the hardware shutdown down.
7802  */
7803 static void tg3_free_consistent(struct tg3 *tp)
7804 {
7805         int i;
7806
7807         for (i = 0; i < tp->irq_cnt; i++) {
7808                 struct tg3_napi *tnapi = &tp->napi[i];
7809
7810                 if (tnapi->hw_status) {
7811                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7812                                           tnapi->hw_status,
7813                                           tnapi->status_mapping);
7814                         tnapi->hw_status = NULL;
7815                 }
7816         }
7817
7818         tg3_mem_rx_release(tp);
7819         tg3_mem_tx_release(tp);
7820
7821         if (tp->hw_stats) {
7822                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7823                                   tp->hw_stats, tp->stats_mapping);
7824                 tp->hw_stats = NULL;
7825         }
7826 }
7827
7828 /*
7829  * Must not be invoked with interrupt sources disabled and
7830  * the hardware shutdown down.  Can sleep.
7831  */
7832 static int tg3_alloc_consistent(struct tg3 *tp)
7833 {
7834         int i;
7835
7836         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7837                                           sizeof(struct tg3_hw_stats),
7838                                           &tp->stats_mapping,
7839                                           GFP_KERNEL);
7840         if (!tp->hw_stats)
7841                 goto err_out;
7842
7843         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7844
7845         for (i = 0; i < tp->irq_cnt; i++) {
7846                 struct tg3_napi *tnapi = &tp->napi[i];
7847                 struct tg3_hw_status *sblk;
7848
7849                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7850                                                       TG3_HW_STATUS_SIZE,
7851                                                       &tnapi->status_mapping,
7852                                                       GFP_KERNEL);
7853                 if (!tnapi->hw_status)
7854                         goto err_out;
7855
7856                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7857                 sblk = tnapi->hw_status;
7858
7859                 if (tg3_flag(tp, ENABLE_RSS)) {
7860                         u16 *prodptr = NULL;
7861
7862                         /*
7863                          * When RSS is enabled, the status block format changes
7864                          * slightly.  The "rx_jumbo_consumer", "reserved",
7865                          * and "rx_mini_consumer" members get mapped to the
7866                          * other three rx return ring producer indexes.
7867                          */
7868                         switch (i) {
7869                         case 1:
7870                                 prodptr = &sblk->idx[0].rx_producer;
7871                                 break;
7872                         case 2:
7873                                 prodptr = &sblk->rx_jumbo_consumer;
7874                                 break;
7875                         case 3:
7876                                 prodptr = &sblk->reserved;
7877                                 break;
7878                         case 4:
7879                                 prodptr = &sblk->rx_mini_consumer;
7880                                 break;
7881                         }
7882                         tnapi->rx_rcb_prod_idx = prodptr;
7883                 } else {
7884                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7885                 }
7886         }
7887
7888         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
7889                 goto err_out;
7890
7891         return 0;
7892
7893 err_out:
7894         tg3_free_consistent(tp);
7895         return -ENOMEM;
7896 }
7897
7898 #define MAX_WAIT_CNT 1000
7899
7900 /* To stop a block, clear the enable bit and poll till it
7901  * clears.  tp->lock is held.
7902  */
7903 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7904 {
7905         unsigned int i;
7906         u32 val;
7907
7908         if (tg3_flag(tp, 5705_PLUS)) {
7909                 switch (ofs) {
7910                 case RCVLSC_MODE:
7911                 case DMAC_MODE:
7912                 case MBFREE_MODE:
7913                 case BUFMGR_MODE:
7914                 case MEMARB_MODE:
7915                         /* We can't enable/disable these bits of the
7916                          * 5705/5750, just say success.
7917                          */
7918                         return 0;
7919
7920                 default:
7921                         break;
7922                 }
7923         }
7924
7925         val = tr32(ofs);
7926         val &= ~enable_bit;
7927         tw32_f(ofs, val);
7928
7929         for (i = 0; i < MAX_WAIT_CNT; i++) {
7930                 udelay(100);
7931                 val = tr32(ofs);
7932                 if ((val & enable_bit) == 0)
7933                         break;
7934         }
7935
7936         if (i == MAX_WAIT_CNT && !silent) {
7937                 dev_err(&tp->pdev->dev,
7938                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7939                         ofs, enable_bit);
7940                 return -ENODEV;
7941         }
7942
7943         return 0;
7944 }
7945
7946 /* tp->lock is held. */
7947 static int tg3_abort_hw(struct tg3 *tp, int silent)
7948 {
7949         int i, err;
7950
7951         tg3_disable_ints(tp);
7952
7953         tp->rx_mode &= ~RX_MODE_ENABLE;
7954         tw32_f(MAC_RX_MODE, tp->rx_mode);
7955         udelay(10);
7956
7957         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7958         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7959         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7960         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7961         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7962         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7963
7964         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7965         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7966         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7967         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7968         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7969         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7970         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7971
7972         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7973         tw32_f(MAC_MODE, tp->mac_mode);
7974         udelay(40);
7975
7976         tp->tx_mode &= ~TX_MODE_ENABLE;
7977         tw32_f(MAC_TX_MODE, tp->tx_mode);
7978
7979         for (i = 0; i < MAX_WAIT_CNT; i++) {
7980                 udelay(100);
7981                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7982                         break;
7983         }
7984         if (i >= MAX_WAIT_CNT) {
7985                 dev_err(&tp->pdev->dev,
7986                         "%s timed out, TX_MODE_ENABLE will not clear "
7987                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7988                 err |= -ENODEV;
7989         }
7990
7991         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7992         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7993         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7994
7995         tw32(FTQ_RESET, 0xffffffff);
7996         tw32(FTQ_RESET, 0x00000000);
7997
7998         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7999         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8000
8001         for (i = 0; i < tp->irq_cnt; i++) {
8002                 struct tg3_napi *tnapi = &tp->napi[i];
8003                 if (tnapi->hw_status)
8004                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8005         }
8006
8007         return err;
8008 }
8009
8010 /* Save PCI command register before chip reset */
8011 static void tg3_save_pci_state(struct tg3 *tp)
8012 {
8013         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8014 }
8015
8016 /* Restore PCI state after chip reset */
8017 static void tg3_restore_pci_state(struct tg3 *tp)
8018 {
8019         u32 val;
8020
8021         /* Re-enable indirect register accesses. */
8022         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8023                                tp->misc_host_ctrl);
8024
8025         /* Set MAX PCI retry to zero. */
8026         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8027         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8028             tg3_flag(tp, PCIX_MODE))
8029                 val |= PCISTATE_RETRY_SAME_DMA;
8030         /* Allow reads and writes to the APE register and memory space. */
8031         if (tg3_flag(tp, ENABLE_APE))
8032                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8033                        PCISTATE_ALLOW_APE_SHMEM_WR |
8034                        PCISTATE_ALLOW_APE_PSPACE_WR;
8035         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8036
8037         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8038
8039         if (!tg3_flag(tp, PCI_EXPRESS)) {
8040                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8041                                       tp->pci_cacheline_sz);
8042                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8043                                       tp->pci_lat_timer);
8044         }
8045
8046         /* Make sure PCI-X relaxed ordering bit is clear. */
8047         if (tg3_flag(tp, PCIX_MODE)) {
8048                 u16 pcix_cmd;
8049
8050                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8051                                      &pcix_cmd);
8052                 pcix_cmd &= ~PCI_X_CMD_ERO;
8053                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8054                                       pcix_cmd);
8055         }
8056
8057         if (tg3_flag(tp, 5780_CLASS)) {
8058
8059                 /* Chip reset on 5780 will reset MSI enable bit,
8060                  * so need to restore it.
8061                  */
8062                 if (tg3_flag(tp, USING_MSI)) {
8063                         u16 ctrl;
8064
8065                         pci_read_config_word(tp->pdev,
8066                                              tp->msi_cap + PCI_MSI_FLAGS,
8067                                              &ctrl);
8068                         pci_write_config_word(tp->pdev,
8069                                               tp->msi_cap + PCI_MSI_FLAGS,
8070                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8071                         val = tr32(MSGINT_MODE);
8072                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8073                 }
8074         }
8075 }
8076
8077 /* tp->lock is held. */
8078 static int tg3_chip_reset(struct tg3 *tp)
8079 {
8080         u32 val;
8081         void (*write_op)(struct tg3 *, u32, u32);
8082         int i, err;
8083
8084         tg3_nvram_lock(tp);
8085
8086         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8087
8088         /* No matching tg3_nvram_unlock() after this because
8089          * chip reset below will undo the nvram lock.
8090          */
8091         tp->nvram_lock_cnt = 0;
8092
8093         /* GRC_MISC_CFG core clock reset will clear the memory
8094          * enable bit in PCI register 4 and the MSI enable bit
8095          * on some chips, so we save relevant registers here.
8096          */
8097         tg3_save_pci_state(tp);
8098
8099         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8100             tg3_flag(tp, 5755_PLUS))
8101                 tw32(GRC_FASTBOOT_PC, 0);
8102
8103         /*
8104          * We must avoid the readl() that normally takes place.
8105          * It locks machines, causes machine checks, and other
8106          * fun things.  So, temporarily disable the 5701
8107          * hardware workaround, while we do the reset.
8108          */
8109         write_op = tp->write32;
8110         if (write_op == tg3_write_flush_reg32)
8111                 tp->write32 = tg3_write32;
8112
8113         /* Prevent the irq handler from reading or writing PCI registers
8114          * during chip reset when the memory enable bit in the PCI command
8115          * register may be cleared.  The chip does not generate interrupt
8116          * at this time, but the irq handler may still be called due to irq
8117          * sharing or irqpoll.
8118          */
8119         tg3_flag_set(tp, CHIP_RESETTING);
8120         for (i = 0; i < tp->irq_cnt; i++) {
8121                 struct tg3_napi *tnapi = &tp->napi[i];
8122                 if (tnapi->hw_status) {
8123                         tnapi->hw_status->status = 0;
8124                         tnapi->hw_status->status_tag = 0;
8125                 }
8126                 tnapi->last_tag = 0;
8127                 tnapi->last_irq_tag = 0;
8128         }
8129         smp_mb();
8130
8131         for (i = 0; i < tp->irq_cnt; i++)
8132                 synchronize_irq(tp->napi[i].irq_vec);
8133
8134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8135                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8136                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8137         }
8138
8139         /* do the reset */
8140         val = GRC_MISC_CFG_CORECLK_RESET;
8141
8142         if (tg3_flag(tp, PCI_EXPRESS)) {
8143                 /* Force PCIe 1.0a mode */
8144                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8145                     !tg3_flag(tp, 57765_PLUS) &&
8146                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8147                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8148                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8149
8150                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
8151                         tw32(GRC_MISC_CFG, (1 << 29));
8152                         val |= (1 << 29);
8153                 }
8154         }
8155
8156         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
8157                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8158                 tw32(GRC_VCPU_EXT_CTRL,
8159                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8160         }
8161
8162         /* Manage gphy power for all CPMU absent PCIe devices. */
8163         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8164                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8165
8166         tw32(GRC_MISC_CFG, val);
8167
8168         /* restore 5701 hardware bug workaround write method */
8169         tp->write32 = write_op;
8170
8171         /* Unfortunately, we have to delay before the PCI read back.
8172          * Some 575X chips even will not respond to a PCI cfg access
8173          * when the reset command is given to the chip.
8174          *
8175          * How do these hardware designers expect things to work
8176          * properly if the PCI write is posted for a long period
8177          * of time?  It is always necessary to have some method by
8178          * which a register read back can occur to push the write
8179          * out which does the reset.
8180          *
8181          * For most tg3 variants the trick below was working.
8182          * Ho hum...
8183          */
8184         udelay(120);
8185
8186         /* Flush PCI posted writes.  The normal MMIO registers
8187          * are inaccessible at this time so this is the only
8188          * way to make this reliably (actually, this is no longer
8189          * the case, see above).  I tried to use indirect
8190          * register read/write but this upset some 5701 variants.
8191          */
8192         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8193
8194         udelay(120);
8195
8196         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8197                 u16 val16;
8198
8199                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
8200                         int j;
8201                         u32 cfg_val;
8202
8203                         /* Wait for link training to complete.  */
8204                         for (j = 0; j < 5000; j++)
8205                                 udelay(100);
8206
8207                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8208                         pci_write_config_dword(tp->pdev, 0xc4,
8209                                                cfg_val | (1 << 15));
8210                 }
8211
8212                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8213                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8214                 /*
8215                  * Older PCIe devices only support the 128 byte
8216                  * MPS setting.  Enforce the restriction.
8217                  */
8218                 if (!tg3_flag(tp, CPMU_PRESENT))
8219                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8220                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8221
8222                 /* Clear error status */
8223                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8224                                       PCI_EXP_DEVSTA_CED |
8225                                       PCI_EXP_DEVSTA_NFED |
8226                                       PCI_EXP_DEVSTA_FED |
8227                                       PCI_EXP_DEVSTA_URD);
8228         }
8229
8230         tg3_restore_pci_state(tp);
8231
8232         tg3_flag_clear(tp, CHIP_RESETTING);
8233         tg3_flag_clear(tp, ERROR_PROCESSED);
8234
8235         val = 0;
8236         if (tg3_flag(tp, 5780_CLASS))
8237                 val = tr32(MEMARB_MODE);
8238         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8239
8240         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
8241                 tg3_stop_fw(tp);
8242                 tw32(0x5000, 0x400);
8243         }
8244
8245         tw32(GRC_MODE, tp->grc_mode);
8246
8247         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
8248                 val = tr32(0xc4);
8249
8250                 tw32(0xc4, val | (1 << 15));
8251         }
8252
8253         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8254             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8255                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8256                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
8257                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
8258                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8259         }
8260
8261         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8262                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
8263                 val = tp->mac_mode;
8264         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8265                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
8266                 val = tp->mac_mode;
8267         } else
8268                 val = 0;
8269
8270         tw32_f(MAC_MODE, val);
8271         udelay(40);
8272
8273         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8274
8275         err = tg3_poll_fw(tp);
8276         if (err)
8277                 return err;
8278
8279         tg3_mdio_start(tp);
8280
8281         if (tg3_flag(tp, PCI_EXPRESS) &&
8282             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
8283             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
8284             !tg3_flag(tp, 57765_PLUS)) {
8285                 val = tr32(0x7c00);
8286
8287                 tw32(0x7c00, val | (1 << 25));
8288         }
8289
8290         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8291                 val = tr32(TG3_CPMU_CLCK_ORIDE);
8292                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
8293         }
8294
8295         /* Reprobe ASF enable state.  */
8296         tg3_flag_clear(tp, ENABLE_ASF);
8297         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
8298         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
8299         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
8300                 u32 nic_cfg;
8301
8302                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
8303                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8304                         tg3_flag_set(tp, ENABLE_ASF);
8305                         tp->last_event_jiffies = jiffies;
8306                         if (tg3_flag(tp, 5750_PLUS))
8307                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8308                 }
8309         }
8310
8311         return 0;
8312 }
8313
8314 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8315 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8316
8317 /* tp->lock is held. */
8318 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8319 {
8320         int err;
8321
8322         tg3_stop_fw(tp);
8323
8324         tg3_write_sig_pre_reset(tp, kind);
8325
8326         tg3_abort_hw(tp, silent);
8327         err = tg3_chip_reset(tp);
8328
8329         __tg3_set_mac_addr(tp, 0);
8330
8331         tg3_write_sig_legacy(tp, kind);
8332         tg3_write_sig_post_reset(tp, kind);
8333
8334         if (tp->hw_stats) {
8335                 /* Save the stats across chip resets... */
8336                 tg3_get_nstats(tp, &tp->net_stats_prev);
8337                 tg3_get_estats(tp, &tp->estats_prev);
8338
8339                 /* And make sure the next sample is new data */
8340                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8341         }
8342
8343         if (err)
8344                 return err;
8345
8346         return 0;
8347 }
8348
8349 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8350 {
8351         struct tg3 *tp = netdev_priv(dev);
8352         struct sockaddr *addr = p;
8353         int err = 0, skip_mac_1 = 0;
8354
8355         if (!is_valid_ether_addr(addr->sa_data))
8356                 return -EADDRNOTAVAIL;
8357
8358         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8359
8360         if (!netif_running(dev))
8361                 return 0;
8362
8363         if (tg3_flag(tp, ENABLE_ASF)) {
8364                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8365
8366                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8367                 addr0_low = tr32(MAC_ADDR_0_LOW);
8368                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8369                 addr1_low = tr32(MAC_ADDR_1_LOW);
8370
8371                 /* Skip MAC addr 1 if ASF is using it. */
8372                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8373                     !(addr1_high == 0 && addr1_low == 0))
8374                         skip_mac_1 = 1;
8375         }
8376         spin_lock_bh(&tp->lock);
8377         __tg3_set_mac_addr(tp, skip_mac_1);
8378         spin_unlock_bh(&tp->lock);
8379
8380         return err;
8381 }
8382
8383 /* tp->lock is held. */
8384 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8385                            dma_addr_t mapping, u32 maxlen_flags,
8386                            u32 nic_addr)
8387 {
8388         tg3_write_mem(tp,
8389                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8390                       ((u64) mapping >> 32));
8391         tg3_write_mem(tp,
8392                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8393                       ((u64) mapping & 0xffffffff));
8394         tg3_write_mem(tp,
8395                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8396                        maxlen_flags);
8397
8398         if (!tg3_flag(tp, 5705_PLUS))
8399                 tg3_write_mem(tp,
8400                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8401                               nic_addr);
8402 }
8403
8404
8405 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8406 {
8407         int i = 0;
8408
8409         if (!tg3_flag(tp, ENABLE_TSS)) {
8410                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8411                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8412                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8413         } else {
8414                 tw32(HOSTCC_TXCOL_TICKS, 0);
8415                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8416                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8417
8418                 for (; i < tp->txq_cnt; i++) {
8419                         u32 reg;
8420
8421                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8422                         tw32(reg, ec->tx_coalesce_usecs);
8423                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8424                         tw32(reg, ec->tx_max_coalesced_frames);
8425                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8426                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8427                 }
8428         }
8429
8430         for (; i < tp->irq_max - 1; i++) {
8431                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8432                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8433                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8434         }
8435 }
8436
8437 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
8438 {
8439         int i = 0;
8440         u32 limit = tp->rxq_cnt;
8441
8442         if (!tg3_flag(tp, ENABLE_RSS)) {
8443                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8444                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8445                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8446                 limit--;
8447         } else {
8448                 tw32(HOSTCC_RXCOL_TICKS, 0);
8449                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8450                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8451         }
8452
8453         for (; i < limit; i++) {
8454                 u32 reg;
8455
8456                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8457                 tw32(reg, ec->rx_coalesce_usecs);
8458                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8459                 tw32(reg, ec->rx_max_coalesced_frames);
8460                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8461                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8462         }
8463
8464         for (; i < tp->irq_max - 1; i++) {
8465                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8466                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8467                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8468         }
8469 }
8470
8471 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8472 {
8473         tg3_coal_tx_init(tp, ec);
8474         tg3_coal_rx_init(tp, ec);
8475
8476         if (!tg3_flag(tp, 5705_PLUS)) {
8477                 u32 val = ec->stats_block_coalesce_usecs;
8478
8479                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8480                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8481
8482                 if (!tp->link_up)
8483                         val = 0;
8484
8485                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8486         }
8487 }
8488
8489 /* tp->lock is held. */
8490 static void tg3_rings_reset(struct tg3 *tp)
8491 {
8492         int i;
8493         u32 stblk, txrcb, rxrcb, limit;
8494         struct tg3_napi *tnapi = &tp->napi[0];
8495
8496         /* Disable all transmit rings but the first. */
8497         if (!tg3_flag(tp, 5705_PLUS))
8498                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8499         else if (tg3_flag(tp, 5717_PLUS))
8500                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8501         else if (tg3_flag(tp, 57765_CLASS))
8502                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8503         else
8504                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8505
8506         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8507              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8508                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8509                               BDINFO_FLAGS_DISABLED);
8510
8511
8512         /* Disable all receive return rings but the first. */
8513         if (tg3_flag(tp, 5717_PLUS))
8514                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8515         else if (!tg3_flag(tp, 5705_PLUS))
8516                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8517         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8518                  tg3_flag(tp, 57765_CLASS))
8519                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8520         else
8521                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8522
8523         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8524              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8525                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8526                               BDINFO_FLAGS_DISABLED);
8527
8528         /* Disable interrupts */
8529         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8530         tp->napi[0].chk_msi_cnt = 0;
8531         tp->napi[0].last_rx_cons = 0;
8532         tp->napi[0].last_tx_cons = 0;
8533
8534         /* Zero mailbox registers. */
8535         if (tg3_flag(tp, SUPPORT_MSIX)) {
8536                 for (i = 1; i < tp->irq_max; i++) {
8537                         tp->napi[i].tx_prod = 0;
8538                         tp->napi[i].tx_cons = 0;
8539                         if (tg3_flag(tp, ENABLE_TSS))
8540                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8541                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8542                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8543                         tp->napi[i].chk_msi_cnt = 0;
8544                         tp->napi[i].last_rx_cons = 0;
8545                         tp->napi[i].last_tx_cons = 0;
8546                 }
8547                 if (!tg3_flag(tp, ENABLE_TSS))
8548                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8549         } else {
8550                 tp->napi[0].tx_prod = 0;
8551                 tp->napi[0].tx_cons = 0;
8552                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8553                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8554         }
8555
8556         /* Make sure the NIC-based send BD rings are disabled. */
8557         if (!tg3_flag(tp, 5705_PLUS)) {
8558                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8559                 for (i = 0; i < 16; i++)
8560                         tw32_tx_mbox(mbox + i * 8, 0);
8561         }
8562
8563         txrcb = NIC_SRAM_SEND_RCB;
8564         rxrcb = NIC_SRAM_RCV_RET_RCB;
8565
8566         /* Clear status block in ram. */
8567         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8568
8569         /* Set status block DMA address */
8570         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8571              ((u64) tnapi->status_mapping >> 32));
8572         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8573              ((u64) tnapi->status_mapping & 0xffffffff));
8574
8575         if (tnapi->tx_ring) {
8576                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8577                                (TG3_TX_RING_SIZE <<
8578                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8579                                NIC_SRAM_TX_BUFFER_DESC);
8580                 txrcb += TG3_BDINFO_SIZE;
8581         }
8582
8583         if (tnapi->rx_rcb) {
8584                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8585                                (tp->rx_ret_ring_mask + 1) <<
8586                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8587                 rxrcb += TG3_BDINFO_SIZE;
8588         }
8589
8590         stblk = HOSTCC_STATBLCK_RING1;
8591
8592         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8593                 u64 mapping = (u64)tnapi->status_mapping;
8594                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8595                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8596
8597                 /* Clear status block in ram. */
8598                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8599
8600                 if (tnapi->tx_ring) {
8601                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8602                                        (TG3_TX_RING_SIZE <<
8603                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8604                                        NIC_SRAM_TX_BUFFER_DESC);
8605                         txrcb += TG3_BDINFO_SIZE;
8606                 }
8607
8608                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8609                                ((tp->rx_ret_ring_mask + 1) <<
8610                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8611
8612                 stblk += 8;
8613                 rxrcb += TG3_BDINFO_SIZE;
8614         }
8615 }
8616
8617 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8618 {
8619         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8620
8621         if (!tg3_flag(tp, 5750_PLUS) ||
8622             tg3_flag(tp, 5780_CLASS) ||
8623             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8624             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8625             tg3_flag(tp, 57765_PLUS))
8626                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8627         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8628                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8629                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8630         else
8631                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8632
8633         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8634         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8635
8636         val = min(nic_rep_thresh, host_rep_thresh);
8637         tw32(RCVBDI_STD_THRESH, val);
8638
8639         if (tg3_flag(tp, 57765_PLUS))
8640                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8641
8642         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8643                 return;
8644
8645         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8646
8647         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8648
8649         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8650         tw32(RCVBDI_JUMBO_THRESH, val);
8651
8652         if (tg3_flag(tp, 57765_PLUS))
8653                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8654 }
8655
8656 static inline u32 calc_crc(unsigned char *buf, int len)
8657 {
8658         u32 reg;
8659         u32 tmp;
8660         int j, k;
8661
8662         reg = 0xffffffff;
8663
8664         for (j = 0; j < len; j++) {
8665                 reg ^= buf[j];
8666
8667                 for (k = 0; k < 8; k++) {
8668                         tmp = reg & 0x01;
8669
8670                         reg >>= 1;
8671
8672                         if (tmp)
8673                                 reg ^= 0xedb88320;
8674                 }
8675         }
8676
8677         return ~reg;
8678 }
8679
8680 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8681 {
8682         /* accept or reject all multicast frames */
8683         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8684         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8685         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8686         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8687 }
8688
8689 static void __tg3_set_rx_mode(struct net_device *dev)
8690 {
8691         struct tg3 *tp = netdev_priv(dev);
8692         u32 rx_mode;
8693
8694         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8695                                   RX_MODE_KEEP_VLAN_TAG);
8696
8697 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8698         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8699          * flag clear.
8700          */
8701         if (!tg3_flag(tp, ENABLE_ASF))
8702                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8703 #endif
8704
8705         if (dev->flags & IFF_PROMISC) {
8706                 /* Promiscuous mode. */
8707                 rx_mode |= RX_MODE_PROMISC;
8708         } else if (dev->flags & IFF_ALLMULTI) {
8709                 /* Accept all multicast. */
8710                 tg3_set_multi(tp, 1);
8711         } else if (netdev_mc_empty(dev)) {
8712                 /* Reject all multicast. */
8713                 tg3_set_multi(tp, 0);
8714         } else {
8715                 /* Accept one or more multicast(s). */
8716                 struct netdev_hw_addr *ha;
8717                 u32 mc_filter[4] = { 0, };
8718                 u32 regidx;
8719                 u32 bit;
8720                 u32 crc;
8721
8722                 netdev_for_each_mc_addr(ha, dev) {
8723                         crc = calc_crc(ha->addr, ETH_ALEN);
8724                         bit = ~crc & 0x7f;
8725                         regidx = (bit & 0x60) >> 5;
8726                         bit &= 0x1f;
8727                         mc_filter[regidx] |= (1 << bit);
8728                 }
8729
8730                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8731                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8732                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8733                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8734         }
8735
8736         if (rx_mode != tp->rx_mode) {
8737                 tp->rx_mode = rx_mode;
8738                 tw32_f(MAC_RX_MODE, rx_mode);
8739                 udelay(10);
8740         }
8741 }
8742
8743 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
8744 {
8745         int i;
8746
8747         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8748                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
8749 }
8750
8751 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8752 {
8753         int i;
8754
8755         if (!tg3_flag(tp, SUPPORT_MSIX))
8756                 return;
8757
8758         if (tp->rxq_cnt == 1) {
8759                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8760                 return;
8761         }
8762
8763         /* Validate table against current IRQ count */
8764         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8765                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
8766                         break;
8767         }
8768
8769         if (i != TG3_RSS_INDIR_TBL_SIZE)
8770                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
8771 }
8772
8773 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8774 {
8775         int i = 0;
8776         u32 reg = MAC_RSS_INDIR_TBL_0;
8777
8778         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8779                 u32 val = tp->rss_ind_tbl[i];
8780                 i++;
8781                 for (; i % 8; i++) {
8782                         val <<= 4;
8783                         val |= tp->rss_ind_tbl[i];
8784                 }
8785                 tw32(reg, val);
8786                 reg += 4;
8787         }
8788 }
8789
8790 /* tp->lock is held. */
8791 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8792 {
8793         u32 val, rdmac_mode;
8794         int i, err, limit;
8795         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8796
8797         tg3_disable_ints(tp);
8798
8799         tg3_stop_fw(tp);
8800
8801         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8802
8803         if (tg3_flag(tp, INIT_COMPLETE))
8804                 tg3_abort_hw(tp, 1);
8805
8806         /* Enable MAC control of LPI */
8807         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8808                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8809                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8810                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8811
8812                 tw32_f(TG3_CPMU_EEE_CTRL,
8813                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8814
8815                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8816                       TG3_CPMU_EEEMD_LPI_IN_TX |
8817                       TG3_CPMU_EEEMD_LPI_IN_RX |
8818                       TG3_CPMU_EEEMD_EEE_ENABLE;
8819
8820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8821                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8822
8823                 if (tg3_flag(tp, ENABLE_APE))
8824                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8825
8826                 tw32_f(TG3_CPMU_EEE_MODE, val);
8827
8828                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8829                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8830                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8831
8832                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8833                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8834                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8835         }
8836
8837         if (reset_phy)
8838                 tg3_phy_reset(tp);
8839
8840         err = tg3_chip_reset(tp);
8841         if (err)
8842                 return err;
8843
8844         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8845
8846         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8847                 val = tr32(TG3_CPMU_CTRL);
8848                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8849                 tw32(TG3_CPMU_CTRL, val);
8850
8851                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8852                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8853                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8854                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8855
8856                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8857                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8858                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8859                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8860
8861                 val = tr32(TG3_CPMU_HST_ACC);
8862                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8863                 val |= CPMU_HST_ACC_MACCLK_6_25;
8864                 tw32(TG3_CPMU_HST_ACC, val);
8865         }
8866
8867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8868                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8869                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8870                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8871                 tw32(PCIE_PWR_MGMT_THRESH, val);
8872
8873                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8874                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8875
8876                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8877
8878                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8879                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8880         }
8881
8882         if (tg3_flag(tp, L1PLLPD_EN)) {
8883                 u32 grc_mode = tr32(GRC_MODE);
8884
8885                 /* Access the lower 1K of PL PCIE block registers. */
8886                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8887                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8888
8889                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8890                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8891                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8892
8893                 tw32(GRC_MODE, grc_mode);
8894         }
8895
8896         if (tg3_flag(tp, 57765_CLASS)) {
8897                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8898                         u32 grc_mode = tr32(GRC_MODE);
8899
8900                         /* Access the lower 1K of PL PCIE block registers. */
8901                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8902                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8903
8904                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8905                                    TG3_PCIE_PL_LO_PHYCTL5);
8906                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8907                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8908
8909                         tw32(GRC_MODE, grc_mode);
8910                 }
8911
8912                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8913                         u32 grc_mode = tr32(GRC_MODE);
8914
8915                         /* Access the lower 1K of DL PCIE block registers. */
8916                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8917                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8918
8919                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8920                                    TG3_PCIE_DL_LO_FTSMAX);
8921                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8922                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8923                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8924
8925                         tw32(GRC_MODE, grc_mode);
8926                 }
8927
8928                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8929                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8930                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8931                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8932         }
8933
8934         /* This works around an issue with Athlon chipsets on
8935          * B3 tigon3 silicon.  This bit has no effect on any
8936          * other revision.  But do not set this on PCI Express
8937          * chips and don't even touch the clocks if the CPMU is present.
8938          */
8939         if (!tg3_flag(tp, CPMU_PRESENT)) {
8940                 if (!tg3_flag(tp, PCI_EXPRESS))
8941                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8942                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8943         }
8944
8945         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8946             tg3_flag(tp, PCIX_MODE)) {
8947                 val = tr32(TG3PCI_PCISTATE);
8948                 val |= PCISTATE_RETRY_SAME_DMA;
8949                 tw32(TG3PCI_PCISTATE, val);
8950         }
8951
8952         if (tg3_flag(tp, ENABLE_APE)) {
8953                 /* Allow reads and writes to the
8954                  * APE register and memory space.
8955                  */
8956                 val = tr32(TG3PCI_PCISTATE);
8957                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8958                        PCISTATE_ALLOW_APE_SHMEM_WR |
8959                        PCISTATE_ALLOW_APE_PSPACE_WR;
8960                 tw32(TG3PCI_PCISTATE, val);
8961         }
8962
8963         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8964                 /* Enable some hw fixes.  */
8965                 val = tr32(TG3PCI_MSI_DATA);
8966                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8967                 tw32(TG3PCI_MSI_DATA, val);
8968         }
8969
8970         /* Descriptor ring init may make accesses to the
8971          * NIC SRAM area to setup the TX descriptors, so we
8972          * can only do this after the hardware has been
8973          * successfully reset.
8974          */
8975         err = tg3_init_rings(tp);
8976         if (err)
8977                 return err;
8978
8979         if (tg3_flag(tp, 57765_PLUS)) {
8980                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8981                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8982                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8983                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8984                 if (!tg3_flag(tp, 57765_CLASS) &&
8985                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8986                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8987                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8988         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8989                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8990                 /* This value is determined during the probe time DMA
8991                  * engine test, tg3_test_dma.
8992                  */
8993                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8994         }
8995
8996         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8997                           GRC_MODE_4X_NIC_SEND_RINGS |
8998                           GRC_MODE_NO_TX_PHDR_CSUM |
8999                           GRC_MODE_NO_RX_PHDR_CSUM);
9000         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9001
9002         /* Pseudo-header checksum is done by hardware logic and not
9003          * the offload processers, so make the chip do the pseudo-
9004          * header checksums on receive.  For transmit it is more
9005          * convenient to do the pseudo-header checksum in software
9006          * as Linux does that on transmit for us in all cases.
9007          */
9008         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9009
9010         tw32(GRC_MODE,
9011              tp->grc_mode |
9012              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
9013
9014         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9015         val = tr32(GRC_MISC_CFG);
9016         val &= ~0xff;
9017         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9018         tw32(GRC_MISC_CFG, val);
9019
9020         /* Initialize MBUF/DESC pool. */
9021         if (tg3_flag(tp, 5750_PLUS)) {
9022                 /* Do nothing.  */
9023         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
9024                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9025                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
9026                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9027                 else
9028                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9029                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9030                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9031         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9032                 int fw_len;
9033
9034                 fw_len = tp->fw_len;
9035                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9036                 tw32(BUFMGR_MB_POOL_ADDR,
9037                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9038                 tw32(BUFMGR_MB_POOL_SIZE,
9039                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9040         }
9041
9042         if (tp->dev->mtu <= ETH_DATA_LEN) {
9043                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9044                      tp->bufmgr_config.mbuf_read_dma_low_water);
9045                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9046                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9047                 tw32(BUFMGR_MB_HIGH_WATER,
9048                      tp->bufmgr_config.mbuf_high_water);
9049         } else {
9050                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9051                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9052                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9053                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9054                 tw32(BUFMGR_MB_HIGH_WATER,
9055                      tp->bufmgr_config.mbuf_high_water_jumbo);
9056         }
9057         tw32(BUFMGR_DMA_LOW_WATER,
9058              tp->bufmgr_config.dma_low_water);
9059         tw32(BUFMGR_DMA_HIGH_WATER,
9060              tp->bufmgr_config.dma_high_water);
9061
9062         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9063         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
9064                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9066             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9067             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
9068                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9069         tw32(BUFMGR_MODE, val);
9070         for (i = 0; i < 2000; i++) {
9071                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9072                         break;
9073                 udelay(10);
9074         }
9075         if (i >= 2000) {
9076                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9077                 return -ENODEV;
9078         }
9079
9080         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
9081                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9082
9083         tg3_setup_rxbd_thresholds(tp);
9084
9085         /* Initialize TG3_BDINFO's at:
9086          *  RCVDBDI_STD_BD:     standard eth size rx ring
9087          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9088          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9089          *
9090          * like so:
9091          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9092          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9093          *                              ring attribute flags
9094          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9095          *
9096          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9097          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9098          *
9099          * The size of each ring is fixed in the firmware, but the location is
9100          * configurable.
9101          */
9102         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9103              ((u64) tpr->rx_std_mapping >> 32));
9104         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9105              ((u64) tpr->rx_std_mapping & 0xffffffff));
9106         if (!tg3_flag(tp, 5717_PLUS))
9107                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9108                      NIC_SRAM_RX_BUFFER_DESC);
9109
9110         /* Disable the mini ring */
9111         if (!tg3_flag(tp, 5705_PLUS))
9112                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9113                      BDINFO_FLAGS_DISABLED);
9114
9115         /* Program the jumbo buffer descriptor ring control
9116          * blocks on those devices that have them.
9117          */
9118         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
9119             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9120
9121                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9122                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9123                              ((u64) tpr->rx_jmb_mapping >> 32));
9124                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9125                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9126                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9127                               BDINFO_FLAGS_MAXLEN_SHIFT;
9128                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9129                              val | BDINFO_FLAGS_USE_EXT_RECV);
9130                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9131                             tg3_flag(tp, 57765_CLASS))
9132                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9133                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9134                 } else {
9135                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9136                              BDINFO_FLAGS_DISABLED);
9137                 }
9138
9139                 if (tg3_flag(tp, 57765_PLUS)) {
9140                         val = TG3_RX_STD_RING_SIZE(tp);
9141                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9142                         val |= (TG3_RX_STD_DMA_SZ << 2);
9143                 } else
9144                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9145         } else
9146                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9147
9148         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9149
9150         tpr->rx_std_prod_idx = tp->rx_pending;
9151         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9152
9153         tpr->rx_jmb_prod_idx =
9154                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9155         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9156
9157         tg3_rings_reset(tp);
9158
9159         /* Initialize MAC address and backoff seed. */
9160         __tg3_set_mac_addr(tp, 0);
9161
9162         /* MTU + ethernet header + FCS + optional VLAN tag */
9163         tw32(MAC_RX_MTU_SIZE,
9164              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9165
9166         /* The slot time is changed by tg3_setup_phy if we
9167          * run at gigabit with half duplex.
9168          */
9169         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9170               (6 << TX_LENGTHS_IPG_SHIFT) |
9171               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9172
9173         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9174                 val |= tr32(MAC_TX_LENGTHS) &
9175                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9176                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9177
9178         tw32(MAC_TX_LENGTHS, val);
9179
9180         /* Receive rules. */
9181         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9182         tw32(RCVLPC_CONFIG, 0x0181);
9183
9184         /* Calculate RDMAC_MODE setting early, we need it to determine
9185          * the RCVLPC_STATE_ENABLE mask.
9186          */
9187         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9188                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9189                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9190                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9191                       RDMAC_MODE_LNGREAD_ENAB);
9192
9193         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9194                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9195
9196         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9197             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9198             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9199                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9200                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9201                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9202
9203         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9204             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9205                 if (tg3_flag(tp, TSO_CAPABLE) &&
9206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
9207                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9208                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9209                            !tg3_flag(tp, IS_5788)) {
9210                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9211                 }
9212         }
9213
9214         if (tg3_flag(tp, PCI_EXPRESS))
9215                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9216
9217         if (tg3_flag(tp, HW_TSO_1) ||
9218             tg3_flag(tp, HW_TSO_2) ||
9219             tg3_flag(tp, HW_TSO_3))
9220                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
9221
9222         if (tg3_flag(tp, 57765_PLUS) ||
9223             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9225                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
9226
9227         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
9228                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
9229
9230         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9231             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9232             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
9234             tg3_flag(tp, 57765_PLUS)) {
9235                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
9236                 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
9237                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
9238                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
9239                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
9240                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
9241                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
9242                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
9243                 }
9244                 tw32(TG3_RDMA_RSRVCTRL_REG,
9245                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
9246         }
9247
9248         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9249             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9250                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9251                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
9252                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
9253                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
9254         }
9255
9256         /* Receive/send statistics. */
9257         if (tg3_flag(tp, 5750_PLUS)) {
9258                 val = tr32(RCVLPC_STATS_ENABLE);
9259                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
9260                 tw32(RCVLPC_STATS_ENABLE, val);
9261         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
9262                    tg3_flag(tp, TSO_CAPABLE)) {
9263                 val = tr32(RCVLPC_STATS_ENABLE);
9264                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
9265                 tw32(RCVLPC_STATS_ENABLE, val);
9266         } else {
9267                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
9268         }
9269         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
9270         tw32(SNDDATAI_STATSENAB, 0xffffff);
9271         tw32(SNDDATAI_STATSCTRL,
9272              (SNDDATAI_SCTRL_ENABLE |
9273               SNDDATAI_SCTRL_FASTUPD));
9274
9275         /* Setup host coalescing engine. */
9276         tw32(HOSTCC_MODE, 0);
9277         for (i = 0; i < 2000; i++) {
9278                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
9279                         break;
9280                 udelay(10);
9281         }
9282
9283         __tg3_set_coalesce(tp, &tp->coal);
9284
9285         if (!tg3_flag(tp, 5705_PLUS)) {
9286                 /* Status/statistics block address.  See tg3_timer,
9287                  * the tg3_periodic_fetch_stats call there, and
9288                  * tg3_get_stats to see how this works for 5705/5750 chips.
9289                  */
9290                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9291                      ((u64) tp->stats_mapping >> 32));
9292                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9293                      ((u64) tp->stats_mapping & 0xffffffff));
9294                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
9295
9296                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
9297
9298                 /* Clear statistics and status block memory areas */
9299                 for (i = NIC_SRAM_STATS_BLK;
9300                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
9301                      i += sizeof(u32)) {
9302                         tg3_write_mem(tp, i, 0);
9303                         udelay(40);
9304                 }
9305         }
9306
9307         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
9308
9309         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
9310         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
9311         if (!tg3_flag(tp, 5705_PLUS))
9312                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
9313
9314         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9315                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
9316                 /* reset to prevent losing 1st rx packet intermittently */
9317                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9318                 udelay(10);
9319         }
9320
9321         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9322                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9323                         MAC_MODE_FHDE_ENABLE;
9324         if (tg3_flag(tp, ENABLE_APE))
9325                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9326         if (!tg3_flag(tp, 5705_PLUS) &&
9327             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9328             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9329                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9330         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9331         udelay(40);
9332
9333         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9334          * If TG3_FLAG_IS_NIC is zero, we should read the
9335          * register to preserve the GPIO settings for LOMs. The GPIOs,
9336          * whether used as inputs or outputs, are set by boot code after
9337          * reset.
9338          */
9339         if (!tg3_flag(tp, IS_NIC)) {
9340                 u32 gpio_mask;
9341
9342                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9343                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9344                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9345
9346                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9347                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9348                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9349
9350                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9351                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9352
9353                 tp->grc_local_ctrl &= ~gpio_mask;
9354                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9355
9356                 /* GPIO1 must be driven high for eeprom write protect */
9357                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9358                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9359                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9360         }
9361         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9362         udelay(100);
9363
9364         if (tg3_flag(tp, USING_MSIX)) {
9365                 val = tr32(MSGINT_MODE);
9366                 val |= MSGINT_MODE_ENABLE;
9367                 if (tp->irq_cnt > 1)
9368                         val |= MSGINT_MODE_MULTIVEC_EN;
9369                 if (!tg3_flag(tp, 1SHOT_MSI))
9370                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9371                 tw32(MSGINT_MODE, val);
9372         }
9373
9374         if (!tg3_flag(tp, 5705_PLUS)) {
9375                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9376                 udelay(40);
9377         }
9378
9379         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9380                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9381                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9382                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9383                WDMAC_MODE_LNGREAD_ENAB);
9384
9385         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9386             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9387                 if (tg3_flag(tp, TSO_CAPABLE) &&
9388                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9389                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9390                         /* nothing */
9391                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9392                            !tg3_flag(tp, IS_5788)) {
9393                         val |= WDMAC_MODE_RX_ACCEL;
9394                 }
9395         }
9396
9397         /* Enable host coalescing bug fix */
9398         if (tg3_flag(tp, 5755_PLUS))
9399                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9400
9401         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9402                 val |= WDMAC_MODE_BURST_ALL_DATA;
9403
9404         tw32_f(WDMAC_MODE, val);
9405         udelay(40);
9406
9407         if (tg3_flag(tp, PCIX_MODE)) {
9408                 u16 pcix_cmd;
9409
9410                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9411                                      &pcix_cmd);
9412                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9413                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9414                         pcix_cmd |= PCI_X_CMD_READ_2K;
9415                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9416                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9417                         pcix_cmd |= PCI_X_CMD_READ_2K;
9418                 }
9419                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9420                                       pcix_cmd);
9421         }
9422
9423         tw32_f(RDMAC_MODE, rdmac_mode);
9424         udelay(40);
9425
9426         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9427                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
9428                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
9429                                 break;
9430                 }
9431                 if (i < TG3_NUM_RDMA_CHANNELS) {
9432                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9433                         val |= TG3_LSO_RD_DMA_TX_LENGTH_WA;
9434                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9435                         tg3_flag_set(tp, 5719_RDMA_BUG);
9436                 }
9437         }
9438
9439         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9440         if (!tg3_flag(tp, 5705_PLUS))
9441                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9442
9443         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9444                 tw32(SNDDATAC_MODE,
9445                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9446         else
9447                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9448
9449         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9450         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9451         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9452         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9453                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9454         tw32(RCVDBDI_MODE, val);
9455         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9456         if (tg3_flag(tp, HW_TSO_1) ||
9457             tg3_flag(tp, HW_TSO_2) ||
9458             tg3_flag(tp, HW_TSO_3))
9459                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9460         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9461         if (tg3_flag(tp, ENABLE_TSS))
9462                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9463         tw32(SNDBDI_MODE, val);
9464         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9465
9466         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9467                 err = tg3_load_5701_a0_firmware_fix(tp);
9468                 if (err)
9469                         return err;
9470         }
9471
9472         if (tg3_flag(tp, TSO_CAPABLE)) {
9473                 err = tg3_load_tso_firmware(tp);
9474                 if (err)
9475                         return err;
9476         }
9477
9478         tp->tx_mode = TX_MODE_ENABLE;
9479
9480         if (tg3_flag(tp, 5755_PLUS) ||
9481             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9482                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9483
9484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9485                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9486                 tp->tx_mode &= ~val;
9487                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9488         }
9489
9490         tw32_f(MAC_TX_MODE, tp->tx_mode);
9491         udelay(100);
9492
9493         if (tg3_flag(tp, ENABLE_RSS)) {
9494                 tg3_rss_write_indir_tbl(tp);
9495
9496                 /* Setup the "secret" hash key. */
9497                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9498                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9499                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9500                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9501                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9502                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9503                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9504                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9505                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9506                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9507         }
9508
9509         tp->rx_mode = RX_MODE_ENABLE;
9510         if (tg3_flag(tp, 5755_PLUS))
9511                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9512
9513         if (tg3_flag(tp, ENABLE_RSS))
9514                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9515                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9516                                RX_MODE_RSS_IPV6_HASH_EN |
9517                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9518                                RX_MODE_RSS_IPV4_HASH_EN |
9519                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9520
9521         tw32_f(MAC_RX_MODE, tp->rx_mode);
9522         udelay(10);
9523
9524         tw32(MAC_LED_CTRL, tp->led_ctrl);
9525
9526         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9527         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9528                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9529                 udelay(10);
9530         }
9531         tw32_f(MAC_RX_MODE, tp->rx_mode);
9532         udelay(10);
9533
9534         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9535                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9536                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9537                         /* Set drive transmission level to 1.2V  */
9538                         /* only if the signal pre-emphasis bit is not set  */
9539                         val = tr32(MAC_SERDES_CFG);
9540                         val &= 0xfffff000;
9541                         val |= 0x880;
9542                         tw32(MAC_SERDES_CFG, val);
9543                 }
9544                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9545                         tw32(MAC_SERDES_CFG, 0x616000);
9546         }
9547
9548         /* Prevent chip from dropping frames when flow control
9549          * is enabled.
9550          */
9551         if (tg3_flag(tp, 57765_CLASS))
9552                 val = 1;
9553         else
9554                 val = 2;
9555         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9556
9557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9558             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9559                 /* Use hardware link auto-negotiation */
9560                 tg3_flag_set(tp, HW_AUTONEG);
9561         }
9562
9563         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9565                 u32 tmp;
9566
9567                 tmp = tr32(SERDES_RX_CTRL);
9568                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9569                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9570                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9571                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9572         }
9573
9574         if (!tg3_flag(tp, USE_PHYLIB)) {
9575                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9576                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9577
9578                 err = tg3_setup_phy(tp, 0);
9579                 if (err)
9580                         return err;
9581
9582                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9583                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9584                         u32 tmp;
9585
9586                         /* Clear CRC stats. */
9587                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9588                                 tg3_writephy(tp, MII_TG3_TEST1,
9589                                              tmp | MII_TG3_TEST1_CRC_EN);
9590                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9591                         }
9592                 }
9593         }
9594
9595         __tg3_set_rx_mode(tp->dev);
9596
9597         /* Initialize receive rules. */
9598         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9599         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9600         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9601         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9602
9603         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9604                 limit = 8;
9605         else
9606                 limit = 16;
9607         if (tg3_flag(tp, ENABLE_ASF))
9608                 limit -= 4;
9609         switch (limit) {
9610         case 16:
9611                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9612         case 15:
9613                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9614         case 14:
9615                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9616         case 13:
9617                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9618         case 12:
9619                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9620         case 11:
9621                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9622         case 10:
9623                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9624         case 9:
9625                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9626         case 8:
9627                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9628         case 7:
9629                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9630         case 6:
9631                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9632         case 5:
9633                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9634         case 4:
9635                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9636         case 3:
9637                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9638         case 2:
9639         case 1:
9640
9641         default:
9642                 break;
9643         }
9644
9645         if (tg3_flag(tp, ENABLE_APE))
9646                 /* Write our heartbeat update interval to APE. */
9647                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9648                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9649
9650         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9651
9652         return 0;
9653 }
9654
9655 /* Called at device open time to get the chip ready for
9656  * packet processing.  Invoked with tp->lock held.
9657  */
9658 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9659 {
9660         tg3_switch_clocks(tp);
9661
9662         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9663
9664         return tg3_reset_hw(tp, reset_phy);
9665 }
9666
9667 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
9668 {
9669         int i;
9670
9671         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
9672                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
9673
9674                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
9675                 off += len;
9676
9677                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
9678                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
9679                         memset(ocir, 0, TG3_OCIR_LEN);
9680         }
9681 }
9682
9683 /* sysfs attributes for hwmon */
9684 static ssize_t tg3_show_temp(struct device *dev,
9685                              struct device_attribute *devattr, char *buf)
9686 {
9687         struct pci_dev *pdev = to_pci_dev(dev);
9688         struct net_device *netdev = pci_get_drvdata(pdev);
9689         struct tg3 *tp = netdev_priv(netdev);
9690         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
9691         u32 temperature;
9692
9693         spin_lock_bh(&tp->lock);
9694         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
9695                                 sizeof(temperature));
9696         spin_unlock_bh(&tp->lock);
9697         return sprintf(buf, "%u\n", temperature);
9698 }
9699
9700
9701 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
9702                           TG3_TEMP_SENSOR_OFFSET);
9703 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
9704                           TG3_TEMP_CAUTION_OFFSET);
9705 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
9706                           TG3_TEMP_MAX_OFFSET);
9707
9708 static struct attribute *tg3_attributes[] = {
9709         &sensor_dev_attr_temp1_input.dev_attr.attr,
9710         &sensor_dev_attr_temp1_crit.dev_attr.attr,
9711         &sensor_dev_attr_temp1_max.dev_attr.attr,
9712         NULL
9713 };
9714
9715 static const struct attribute_group tg3_group = {
9716         .attrs = tg3_attributes,
9717 };
9718
9719 static void tg3_hwmon_close(struct tg3 *tp)
9720 {
9721         if (tp->hwmon_dev) {
9722                 hwmon_device_unregister(tp->hwmon_dev);
9723                 tp->hwmon_dev = NULL;
9724                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
9725         }
9726 }
9727
9728 static void tg3_hwmon_open(struct tg3 *tp)
9729 {
9730         int i, err;
9731         u32 size = 0;
9732         struct pci_dev *pdev = tp->pdev;
9733         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
9734
9735         tg3_sd_scan_scratchpad(tp, ocirs);
9736
9737         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
9738                 if (!ocirs[i].src_data_length)
9739                         continue;
9740
9741                 size += ocirs[i].src_hdr_length;
9742                 size += ocirs[i].src_data_length;
9743         }
9744
9745         if (!size)
9746                 return;
9747
9748         /* Register hwmon sysfs hooks */
9749         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
9750         if (err) {
9751                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
9752                 return;
9753         }
9754
9755         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
9756         if (IS_ERR(tp->hwmon_dev)) {
9757                 tp->hwmon_dev = NULL;
9758                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
9759                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
9760         }
9761 }
9762
9763
9764 #define TG3_STAT_ADD32(PSTAT, REG) \
9765 do {    u32 __val = tr32(REG); \
9766         (PSTAT)->low += __val; \
9767         if ((PSTAT)->low < __val) \
9768                 (PSTAT)->high += 1; \
9769 } while (0)
9770
9771 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9772 {
9773         struct tg3_hw_stats *sp = tp->hw_stats;
9774
9775         if (!tp->link_up)
9776                 return;
9777
9778         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9779         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9780         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9781         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9782         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9783         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9784         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9785         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9786         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9787         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9788         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9789         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9790         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9791         if (unlikely(tg3_flag(tp, 5719_RDMA_BUG) &&
9792                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
9793                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
9794                 u32 val;
9795
9796                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
9797                 val &= ~TG3_LSO_RD_DMA_TX_LENGTH_WA;
9798                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
9799                 tg3_flag_clear(tp, 5719_RDMA_BUG);
9800         }
9801
9802         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9803         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9804         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9805         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9806         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9807         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9808         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9809         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9810         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9811         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9812         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9813         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9814         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9815         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9816
9817         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9818         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9819             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9820             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9821                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9822         } else {
9823                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9824                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9825                 if (val) {
9826                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9827                         sp->rx_discards.low += val;
9828                         if (sp->rx_discards.low < val)
9829                                 sp->rx_discards.high += 1;
9830                 }
9831                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9832         }
9833         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9834 }
9835
9836 static void tg3_chk_missed_msi(struct tg3 *tp)
9837 {
9838         u32 i;
9839
9840         for (i = 0; i < tp->irq_cnt; i++) {
9841                 struct tg3_napi *tnapi = &tp->napi[i];
9842
9843                 if (tg3_has_work(tnapi)) {
9844                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9845                             tnapi->last_tx_cons == tnapi->tx_cons) {
9846                                 if (tnapi->chk_msi_cnt < 1) {
9847                                         tnapi->chk_msi_cnt++;
9848                                         return;
9849                                 }
9850                                 tg3_msi(0, tnapi);
9851                         }
9852                 }
9853                 tnapi->chk_msi_cnt = 0;
9854                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9855                 tnapi->last_tx_cons = tnapi->tx_cons;
9856         }
9857 }
9858
9859 static void tg3_timer(unsigned long __opaque)
9860 {
9861         struct tg3 *tp = (struct tg3 *) __opaque;
9862
9863         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9864                 goto restart_timer;
9865
9866         spin_lock(&tp->lock);
9867
9868         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9869             tg3_flag(tp, 57765_CLASS))
9870                 tg3_chk_missed_msi(tp);
9871
9872         if (!tg3_flag(tp, TAGGED_STATUS)) {
9873                 /* All of this garbage is because when using non-tagged
9874                  * IRQ status the mailbox/status_block protocol the chip
9875                  * uses with the cpu is race prone.
9876                  */
9877                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9878                         tw32(GRC_LOCAL_CTRL,
9879                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9880                 } else {
9881                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9882                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9883                 }
9884
9885                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9886                         spin_unlock(&tp->lock);
9887                         tg3_reset_task_schedule(tp);
9888                         goto restart_timer;
9889                 }
9890         }
9891
9892         /* This part only runs once per second. */
9893         if (!--tp->timer_counter) {
9894                 if (tg3_flag(tp, 5705_PLUS))
9895                         tg3_periodic_fetch_stats(tp);
9896
9897                 if (tp->setlpicnt && !--tp->setlpicnt)
9898                         tg3_phy_eee_enable(tp);
9899
9900                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9901                         u32 mac_stat;
9902                         int phy_event;
9903
9904                         mac_stat = tr32(MAC_STATUS);
9905
9906                         phy_event = 0;
9907                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9908                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9909                                         phy_event = 1;
9910                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9911                                 phy_event = 1;
9912
9913                         if (phy_event)
9914                                 tg3_setup_phy(tp, 0);
9915                 } else if (tg3_flag(tp, POLL_SERDES)) {
9916                         u32 mac_stat = tr32(MAC_STATUS);
9917                         int need_setup = 0;
9918
9919                         if (tp->link_up &&
9920                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9921                                 need_setup = 1;
9922                         }
9923                         if (!tp->link_up &&
9924                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9925                                          MAC_STATUS_SIGNAL_DET))) {
9926                                 need_setup = 1;
9927                         }
9928                         if (need_setup) {
9929                                 if (!tp->serdes_counter) {
9930                                         tw32_f(MAC_MODE,
9931                                              (tp->mac_mode &
9932                                               ~MAC_MODE_PORT_MODE_MASK));
9933                                         udelay(40);
9934                                         tw32_f(MAC_MODE, tp->mac_mode);
9935                                         udelay(40);
9936                                 }
9937                                 tg3_setup_phy(tp, 0);
9938                         }
9939                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9940                            tg3_flag(tp, 5780_CLASS)) {
9941                         tg3_serdes_parallel_detect(tp);
9942                 }
9943
9944                 tp->timer_counter = tp->timer_multiplier;
9945         }
9946
9947         /* Heartbeat is only sent once every 2 seconds.
9948          *
9949          * The heartbeat is to tell the ASF firmware that the host
9950          * driver is still alive.  In the event that the OS crashes,
9951          * ASF needs to reset the hardware to free up the FIFO space
9952          * that may be filled with rx packets destined for the host.
9953          * If the FIFO is full, ASF will no longer function properly.
9954          *
9955          * Unintended resets have been reported on real time kernels
9956          * where the timer doesn't run on time.  Netpoll will also have
9957          * same problem.
9958          *
9959          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9960          * to check the ring condition when the heartbeat is expiring
9961          * before doing the reset.  This will prevent most unintended
9962          * resets.
9963          */
9964         if (!--tp->asf_counter) {
9965                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9966                         tg3_wait_for_event_ack(tp);
9967
9968                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9969                                       FWCMD_NICDRV_ALIVE3);
9970                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9971                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9972                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9973
9974                         tg3_generate_fw_event(tp);
9975                 }
9976                 tp->asf_counter = tp->asf_multiplier;
9977         }
9978
9979         spin_unlock(&tp->lock);
9980
9981 restart_timer:
9982         tp->timer.expires = jiffies + tp->timer_offset;
9983         add_timer(&tp->timer);
9984 }
9985
9986 static void tg3_timer_init(struct tg3 *tp)
9987 {
9988         if (tg3_flag(tp, TAGGED_STATUS) &&
9989             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9990             !tg3_flag(tp, 57765_CLASS))
9991                 tp->timer_offset = HZ;
9992         else
9993                 tp->timer_offset = HZ / 10;
9994
9995         BUG_ON(tp->timer_offset > HZ);
9996
9997         tp->timer_multiplier = (HZ / tp->timer_offset);
9998         tp->asf_multiplier = (HZ / tp->timer_offset) *
9999                              TG3_FW_UPDATE_FREQ_SEC;
10000
10001         init_timer(&tp->timer);
10002         tp->timer.data = (unsigned long) tp;
10003         tp->timer.function = tg3_timer;
10004 }
10005
10006 static void tg3_timer_start(struct tg3 *tp)
10007 {
10008         tp->asf_counter   = tp->asf_multiplier;
10009         tp->timer_counter = tp->timer_multiplier;
10010
10011         tp->timer.expires = jiffies + tp->timer_offset;
10012         add_timer(&tp->timer);
10013 }
10014
10015 static void tg3_timer_stop(struct tg3 *tp)
10016 {
10017         del_timer_sync(&tp->timer);
10018 }
10019
10020 /* Restart hardware after configuration changes, self-test, etc.
10021  * Invoked with tp->lock held.
10022  */
10023 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
10024         __releases(tp->lock)
10025         __acquires(tp->lock)
10026 {
10027         int err;
10028
10029         err = tg3_init_hw(tp, reset_phy);
10030         if (err) {
10031                 netdev_err(tp->dev,
10032                            "Failed to re-initialize device, aborting\n");
10033                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10034                 tg3_full_unlock(tp);
10035                 tg3_timer_stop(tp);
10036                 tp->irq_sync = 0;
10037                 tg3_napi_enable(tp);
10038                 dev_close(tp->dev);
10039                 tg3_full_lock(tp, 0);
10040         }
10041         return err;
10042 }
10043
10044 static void tg3_reset_task(struct work_struct *work)
10045 {
10046         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10047         int err;
10048
10049         tg3_full_lock(tp, 0);
10050
10051         if (!netif_running(tp->dev)) {
10052                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10053                 tg3_full_unlock(tp);
10054                 return;
10055         }
10056
10057         tg3_full_unlock(tp);
10058
10059         tg3_phy_stop(tp);
10060
10061         tg3_netif_stop(tp);
10062
10063         tg3_full_lock(tp, 1);
10064
10065         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10066                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10067                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10068                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10069                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10070         }
10071
10072         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10073         err = tg3_init_hw(tp, 1);
10074         if (err)
10075                 goto out;
10076
10077         tg3_netif_start(tp);
10078
10079 out:
10080         tg3_full_unlock(tp);
10081
10082         if (!err)
10083                 tg3_phy_start(tp);
10084
10085         tg3_flag_clear(tp, RESET_TASK_PENDING);
10086 }
10087
10088 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10089 {
10090         irq_handler_t fn;
10091         unsigned long flags;
10092         char *name;
10093         struct tg3_napi *tnapi = &tp->napi[irq_num];
10094
10095         if (tp->irq_cnt == 1)
10096                 name = tp->dev->name;
10097         else {
10098                 name = &tnapi->irq_lbl[0];
10099                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10100                 name[IFNAMSIZ-1] = 0;
10101         }
10102
10103         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10104                 fn = tg3_msi;
10105                 if (tg3_flag(tp, 1SHOT_MSI))
10106                         fn = tg3_msi_1shot;
10107                 flags = 0;
10108         } else {
10109                 fn = tg3_interrupt;
10110                 if (tg3_flag(tp, TAGGED_STATUS))
10111                         fn = tg3_interrupt_tagged;
10112                 flags = IRQF_SHARED;
10113         }
10114
10115         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10116 }
10117
10118 static int tg3_test_interrupt(struct tg3 *tp)
10119 {
10120         struct tg3_napi *tnapi = &tp->napi[0];
10121         struct net_device *dev = tp->dev;
10122         int err, i, intr_ok = 0;
10123         u32 val;
10124
10125         if (!netif_running(dev))
10126                 return -ENODEV;
10127
10128         tg3_disable_ints(tp);
10129
10130         free_irq(tnapi->irq_vec, tnapi);
10131
10132         /*
10133          * Turn off MSI one shot mode.  Otherwise this test has no
10134          * observable way to know whether the interrupt was delivered.
10135          */
10136         if (tg3_flag(tp, 57765_PLUS)) {
10137                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10138                 tw32(MSGINT_MODE, val);
10139         }
10140
10141         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10142                           IRQF_SHARED, dev->name, tnapi);
10143         if (err)
10144                 return err;
10145
10146         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10147         tg3_enable_ints(tp);
10148
10149         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10150                tnapi->coal_now);
10151
10152         for (i = 0; i < 5; i++) {
10153                 u32 int_mbox, misc_host_ctrl;
10154
10155                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10156                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10157
10158                 if ((int_mbox != 0) ||
10159                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10160                         intr_ok = 1;
10161                         break;
10162                 }
10163
10164                 if (tg3_flag(tp, 57765_PLUS) &&
10165                     tnapi->hw_status->status_tag != tnapi->last_tag)
10166                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10167
10168                 msleep(10);
10169         }
10170
10171         tg3_disable_ints(tp);
10172
10173         free_irq(tnapi->irq_vec, tnapi);
10174
10175         err = tg3_request_irq(tp, 0);
10176
10177         if (err)
10178                 return err;
10179
10180         if (intr_ok) {
10181                 /* Reenable MSI one shot mode. */
10182                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
10183                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
10184                         tw32(MSGINT_MODE, val);
10185                 }
10186                 return 0;
10187         }
10188
10189         return -EIO;
10190 }
10191
10192 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
10193  * successfully restored
10194  */
10195 static int tg3_test_msi(struct tg3 *tp)
10196 {
10197         int err;
10198         u16 pci_cmd;
10199
10200         if (!tg3_flag(tp, USING_MSI))
10201                 return 0;
10202
10203         /* Turn off SERR reporting in case MSI terminates with Master
10204          * Abort.
10205          */
10206         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
10207         pci_write_config_word(tp->pdev, PCI_COMMAND,
10208                               pci_cmd & ~PCI_COMMAND_SERR);
10209
10210         err = tg3_test_interrupt(tp);
10211
10212         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
10213
10214         if (!err)
10215                 return 0;
10216
10217         /* other failures */
10218         if (err != -EIO)
10219                 return err;
10220
10221         /* MSI test failed, go back to INTx mode */
10222         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
10223                     "to INTx mode. Please report this failure to the PCI "
10224                     "maintainer and include system chipset information\n");
10225
10226         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10227
10228         pci_disable_msi(tp->pdev);
10229
10230         tg3_flag_clear(tp, USING_MSI);
10231         tp->napi[0].irq_vec = tp->pdev->irq;
10232
10233         err = tg3_request_irq(tp, 0);
10234         if (err)
10235                 return err;
10236
10237         /* Need to reset the chip because the MSI cycle may have terminated
10238          * with Master Abort.
10239          */
10240         tg3_full_lock(tp, 1);
10241
10242         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10243         err = tg3_init_hw(tp, 1);
10244
10245         tg3_full_unlock(tp);
10246
10247         if (err)
10248                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
10249
10250         return err;
10251 }
10252
10253 static int tg3_request_firmware(struct tg3 *tp)
10254 {
10255         const __be32 *fw_data;
10256
10257         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10258                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
10259                            tp->fw_needed);
10260                 return -ENOENT;
10261         }
10262
10263         fw_data = (void *)tp->fw->data;
10264
10265         /* Firmware blob starts with version numbers, followed by
10266          * start address and _full_ length including BSS sections
10267          * (which must be longer than the actual data, of course
10268          */
10269
10270         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
10271         if (tp->fw_len < (tp->fw->size - 12)) {
10272                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10273                            tp->fw_len, tp->fw_needed);
10274                 release_firmware(tp->fw);
10275                 tp->fw = NULL;
10276                 return -EINVAL;
10277         }
10278
10279         /* We no longer need firmware; we have it. */
10280         tp->fw_needed = NULL;
10281         return 0;
10282 }
10283
10284 static u32 tg3_irq_count(struct tg3 *tp)
10285 {
10286         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
10287
10288         if (irq_cnt > 1) {
10289                 /* We want as many rx rings enabled as there are cpus.
10290                  * In multiqueue MSI-X mode, the first MSI-X vector
10291                  * only deals with link interrupts, etc, so we add
10292                  * one to the number of vectors we are requesting.
10293                  */
10294                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
10295         }
10296
10297         return irq_cnt;
10298 }
10299
10300 static bool tg3_enable_msix(struct tg3 *tp)
10301 {
10302         int i, rc;
10303         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
10304
10305         tp->txq_cnt = tp->txq_req;
10306         tp->rxq_cnt = tp->rxq_req;
10307         if (!tp->rxq_cnt)
10308                 tp->rxq_cnt = netif_get_num_default_rss_queues();
10309         if (tp->rxq_cnt > tp->rxq_max)
10310                 tp->rxq_cnt = tp->rxq_max;
10311
10312         /* Disable multiple TX rings by default.  Simple round-robin hardware
10313          * scheduling of the TX rings can cause starvation of rings with
10314          * small packets when other rings have TSO or jumbo packets.
10315          */
10316         if (!tp->txq_req)
10317                 tp->txq_cnt = 1;
10318
10319         tp->irq_cnt = tg3_irq_count(tp);
10320
10321         for (i = 0; i < tp->irq_max; i++) {
10322                 msix_ent[i].entry  = i;
10323                 msix_ent[i].vector = 0;
10324         }
10325
10326         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
10327         if (rc < 0) {
10328                 return false;
10329         } else if (rc != 0) {
10330                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
10331                         return false;
10332                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
10333                               tp->irq_cnt, rc);
10334                 tp->irq_cnt = rc;
10335                 tp->rxq_cnt = max(rc - 1, 1);
10336                 if (tp->txq_cnt)
10337                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
10338         }
10339
10340         for (i = 0; i < tp->irq_max; i++)
10341                 tp->napi[i].irq_vec = msix_ent[i].vector;
10342
10343         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
10344                 pci_disable_msix(tp->pdev);
10345                 return false;
10346         }
10347
10348         if (tp->irq_cnt == 1)
10349                 return true;
10350
10351         tg3_flag_set(tp, ENABLE_RSS);
10352
10353         if (tp->txq_cnt > 1)
10354                 tg3_flag_set(tp, ENABLE_TSS);
10355
10356         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
10357
10358         return true;
10359 }
10360
10361 static void tg3_ints_init(struct tg3 *tp)
10362 {
10363         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
10364             !tg3_flag(tp, TAGGED_STATUS)) {
10365                 /* All MSI supporting chips should support tagged
10366                  * status.  Assert that this is the case.
10367                  */
10368                 netdev_warn(tp->dev,
10369                             "MSI without TAGGED_STATUS? Not using MSI\n");
10370                 goto defcfg;
10371         }
10372
10373         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
10374                 tg3_flag_set(tp, USING_MSIX);
10375         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
10376                 tg3_flag_set(tp, USING_MSI);
10377
10378         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10379                 u32 msi_mode = tr32(MSGINT_MODE);
10380                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
10381                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
10382                 if (!tg3_flag(tp, 1SHOT_MSI))
10383                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
10384                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
10385         }
10386 defcfg:
10387         if (!tg3_flag(tp, USING_MSIX)) {
10388                 tp->irq_cnt = 1;
10389                 tp->napi[0].irq_vec = tp->pdev->irq;
10390         }
10391
10392         if (tp->irq_cnt == 1) {
10393                 tp->txq_cnt = 1;
10394                 tp->rxq_cnt = 1;
10395                 netif_set_real_num_tx_queues(tp->dev, 1);
10396                 netif_set_real_num_rx_queues(tp->dev, 1);
10397         }
10398 }
10399
10400 static void tg3_ints_fini(struct tg3 *tp)
10401 {
10402         if (tg3_flag(tp, USING_MSIX))
10403                 pci_disable_msix(tp->pdev);
10404         else if (tg3_flag(tp, USING_MSI))
10405                 pci_disable_msi(tp->pdev);
10406         tg3_flag_clear(tp, USING_MSI);
10407         tg3_flag_clear(tp, USING_MSIX);
10408         tg3_flag_clear(tp, ENABLE_RSS);
10409         tg3_flag_clear(tp, ENABLE_TSS);
10410 }
10411
10412 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
10413                      bool init)
10414 {
10415         struct net_device *dev = tp->dev;
10416         int i, err;
10417
10418         /*
10419          * Setup interrupts first so we know how
10420          * many NAPI resources to allocate
10421          */
10422         tg3_ints_init(tp);
10423
10424         tg3_rss_check_indir_tbl(tp);
10425
10426         /* The placement of this call is tied
10427          * to the setup and use of Host TX descriptors.
10428          */
10429         err = tg3_alloc_consistent(tp);
10430         if (err)
10431                 goto err_out1;
10432
10433         tg3_napi_init(tp);
10434
10435         tg3_napi_enable(tp);
10436
10437         for (i = 0; i < tp->irq_cnt; i++) {
10438                 struct tg3_napi *tnapi = &tp->napi[i];
10439                 err = tg3_request_irq(tp, i);
10440                 if (err) {
10441                         for (i--; i >= 0; i--) {
10442                                 tnapi = &tp->napi[i];
10443                                 free_irq(tnapi->irq_vec, tnapi);
10444                         }
10445                         goto err_out2;
10446                 }
10447         }
10448
10449         tg3_full_lock(tp, 0);
10450
10451         err = tg3_init_hw(tp, reset_phy);
10452         if (err) {
10453                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10454                 tg3_free_rings(tp);
10455         }
10456
10457         tg3_full_unlock(tp);
10458
10459         if (err)
10460                 goto err_out3;
10461
10462         if (test_irq && tg3_flag(tp, USING_MSI)) {
10463                 err = tg3_test_msi(tp);
10464
10465                 if (err) {
10466                         tg3_full_lock(tp, 0);
10467                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10468                         tg3_free_rings(tp);
10469                         tg3_full_unlock(tp);
10470
10471                         goto err_out2;
10472                 }
10473
10474                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10475                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10476
10477                         tw32(PCIE_TRANSACTION_CFG,
10478                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10479                 }
10480         }
10481
10482         tg3_phy_start(tp);
10483
10484         tg3_hwmon_open(tp);
10485
10486         tg3_full_lock(tp, 0);
10487
10488         tg3_timer_start(tp);
10489         tg3_flag_set(tp, INIT_COMPLETE);
10490         tg3_enable_ints(tp);
10491
10492         if (init)
10493                 tg3_ptp_init(tp);
10494         else
10495                 tg3_ptp_resume(tp);
10496
10497
10498         tg3_full_unlock(tp);
10499
10500         netif_tx_start_all_queues(dev);
10501
10502         /*
10503          * Reset loopback feature if it was turned on while the device was down
10504          * make sure that it's installed properly now.
10505          */
10506         if (dev->features & NETIF_F_LOOPBACK)
10507                 tg3_set_loopback(dev, dev->features);
10508
10509         return 0;
10510
10511 err_out3:
10512         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10513                 struct tg3_napi *tnapi = &tp->napi[i];
10514                 free_irq(tnapi->irq_vec, tnapi);
10515         }
10516
10517 err_out2:
10518         tg3_napi_disable(tp);
10519         tg3_napi_fini(tp);
10520         tg3_free_consistent(tp);
10521
10522 err_out1:
10523         tg3_ints_fini(tp);
10524
10525         return err;
10526 }
10527
10528 static void tg3_stop(struct tg3 *tp)
10529 {
10530         int i;
10531
10532         tg3_reset_task_cancel(tp);
10533         tg3_netif_stop(tp);
10534
10535         tg3_timer_stop(tp);
10536
10537         tg3_hwmon_close(tp);
10538
10539         tg3_phy_stop(tp);
10540
10541         tg3_full_lock(tp, 1);
10542
10543         tg3_disable_ints(tp);
10544
10545         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10546         tg3_free_rings(tp);
10547         tg3_flag_clear(tp, INIT_COMPLETE);
10548
10549         tg3_full_unlock(tp);
10550
10551         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10552                 struct tg3_napi *tnapi = &tp->napi[i];
10553                 free_irq(tnapi->irq_vec, tnapi);
10554         }
10555
10556         tg3_ints_fini(tp);
10557
10558         tg3_napi_fini(tp);
10559
10560         tg3_free_consistent(tp);
10561 }
10562
10563 static int tg3_open(struct net_device *dev)
10564 {
10565         struct tg3 *tp = netdev_priv(dev);
10566         int err;
10567
10568         if (tp->fw_needed) {
10569                 err = tg3_request_firmware(tp);
10570                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
10571                         if (err)
10572                                 return err;
10573                 } else if (err) {
10574                         netdev_warn(tp->dev, "TSO capability disabled\n");
10575                         tg3_flag_clear(tp, TSO_CAPABLE);
10576                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
10577                         netdev_notice(tp->dev, "TSO capability restored\n");
10578                         tg3_flag_set(tp, TSO_CAPABLE);
10579                 }
10580         }
10581
10582         tg3_carrier_off(tp);
10583
10584         err = tg3_power_up(tp);
10585         if (err)
10586                 return err;
10587
10588         tg3_full_lock(tp, 0);
10589
10590         tg3_disable_ints(tp);
10591         tg3_flag_clear(tp, INIT_COMPLETE);
10592
10593         tg3_full_unlock(tp);
10594
10595         err = tg3_start(tp, true, true, true);
10596         if (err) {
10597                 tg3_frob_aux_power(tp, false);
10598                 pci_set_power_state(tp->pdev, PCI_D3hot);
10599         }
10600
10601         return err;
10602 }
10603
10604 static int tg3_close(struct net_device *dev)
10605 {
10606         struct tg3 *tp = netdev_priv(dev);
10607
10608         tg3_ptp_fini(tp);
10609
10610         tg3_stop(tp);
10611
10612         /* Clear stats across close / open calls */
10613         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10614         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10615
10616         tg3_power_down(tp);
10617
10618         tg3_carrier_off(tp);
10619
10620         return 0;
10621 }
10622
10623 static inline u64 get_stat64(tg3_stat64_t *val)
10624 {
10625        return ((u64)val->high << 32) | ((u64)val->low);
10626 }
10627
10628 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10629 {
10630         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10631
10632         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10633             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10634              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10635                 u32 val;
10636
10637                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10638                         tg3_writephy(tp, MII_TG3_TEST1,
10639                                      val | MII_TG3_TEST1_CRC_EN);
10640                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10641                 } else
10642                         val = 0;
10643
10644                 tp->phy_crc_errors += val;
10645
10646                 return tp->phy_crc_errors;
10647         }
10648
10649         return get_stat64(&hw_stats->rx_fcs_errors);
10650 }
10651
10652 #define ESTAT_ADD(member) \
10653         estats->member =        old_estats->member + \
10654                                 get_stat64(&hw_stats->member)
10655
10656 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10657 {
10658         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10659         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10660
10661         ESTAT_ADD(rx_octets);
10662         ESTAT_ADD(rx_fragments);
10663         ESTAT_ADD(rx_ucast_packets);
10664         ESTAT_ADD(rx_mcast_packets);
10665         ESTAT_ADD(rx_bcast_packets);
10666         ESTAT_ADD(rx_fcs_errors);
10667         ESTAT_ADD(rx_align_errors);
10668         ESTAT_ADD(rx_xon_pause_rcvd);
10669         ESTAT_ADD(rx_xoff_pause_rcvd);
10670         ESTAT_ADD(rx_mac_ctrl_rcvd);
10671         ESTAT_ADD(rx_xoff_entered);
10672         ESTAT_ADD(rx_frame_too_long_errors);
10673         ESTAT_ADD(rx_jabbers);
10674         ESTAT_ADD(rx_undersize_packets);
10675         ESTAT_ADD(rx_in_length_errors);
10676         ESTAT_ADD(rx_out_length_errors);
10677         ESTAT_ADD(rx_64_or_less_octet_packets);
10678         ESTAT_ADD(rx_65_to_127_octet_packets);
10679         ESTAT_ADD(rx_128_to_255_octet_packets);
10680         ESTAT_ADD(rx_256_to_511_octet_packets);
10681         ESTAT_ADD(rx_512_to_1023_octet_packets);
10682         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10683         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10684         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10685         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10686         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10687
10688         ESTAT_ADD(tx_octets);
10689         ESTAT_ADD(tx_collisions);
10690         ESTAT_ADD(tx_xon_sent);
10691         ESTAT_ADD(tx_xoff_sent);
10692         ESTAT_ADD(tx_flow_control);
10693         ESTAT_ADD(tx_mac_errors);
10694         ESTAT_ADD(tx_single_collisions);
10695         ESTAT_ADD(tx_mult_collisions);
10696         ESTAT_ADD(tx_deferred);
10697         ESTAT_ADD(tx_excessive_collisions);
10698         ESTAT_ADD(tx_late_collisions);
10699         ESTAT_ADD(tx_collide_2times);
10700         ESTAT_ADD(tx_collide_3times);
10701         ESTAT_ADD(tx_collide_4times);
10702         ESTAT_ADD(tx_collide_5times);
10703         ESTAT_ADD(tx_collide_6times);
10704         ESTAT_ADD(tx_collide_7times);
10705         ESTAT_ADD(tx_collide_8times);
10706         ESTAT_ADD(tx_collide_9times);
10707         ESTAT_ADD(tx_collide_10times);
10708         ESTAT_ADD(tx_collide_11times);
10709         ESTAT_ADD(tx_collide_12times);
10710         ESTAT_ADD(tx_collide_13times);
10711         ESTAT_ADD(tx_collide_14times);
10712         ESTAT_ADD(tx_collide_15times);
10713         ESTAT_ADD(tx_ucast_packets);
10714         ESTAT_ADD(tx_mcast_packets);
10715         ESTAT_ADD(tx_bcast_packets);
10716         ESTAT_ADD(tx_carrier_sense_errors);
10717         ESTAT_ADD(tx_discards);
10718         ESTAT_ADD(tx_errors);
10719
10720         ESTAT_ADD(dma_writeq_full);
10721         ESTAT_ADD(dma_write_prioq_full);
10722         ESTAT_ADD(rxbds_empty);
10723         ESTAT_ADD(rx_discards);
10724         ESTAT_ADD(rx_errors);
10725         ESTAT_ADD(rx_threshold_hit);
10726
10727         ESTAT_ADD(dma_readq_full);
10728         ESTAT_ADD(dma_read_prioq_full);
10729         ESTAT_ADD(tx_comp_queue_full);
10730
10731         ESTAT_ADD(ring_set_send_prod_index);
10732         ESTAT_ADD(ring_status_update);
10733         ESTAT_ADD(nic_irqs);
10734         ESTAT_ADD(nic_avoided_irqs);
10735         ESTAT_ADD(nic_tx_threshold_hit);
10736
10737         ESTAT_ADD(mbuf_lwm_thresh_hit);
10738 }
10739
10740 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10741 {
10742         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10743         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10744
10745         stats->rx_packets = old_stats->rx_packets +
10746                 get_stat64(&hw_stats->rx_ucast_packets) +
10747                 get_stat64(&hw_stats->rx_mcast_packets) +
10748                 get_stat64(&hw_stats->rx_bcast_packets);
10749
10750         stats->tx_packets = old_stats->tx_packets +
10751                 get_stat64(&hw_stats->tx_ucast_packets) +
10752                 get_stat64(&hw_stats->tx_mcast_packets) +
10753                 get_stat64(&hw_stats->tx_bcast_packets);
10754
10755         stats->rx_bytes = old_stats->rx_bytes +
10756                 get_stat64(&hw_stats->rx_octets);
10757         stats->tx_bytes = old_stats->tx_bytes +
10758                 get_stat64(&hw_stats->tx_octets);
10759
10760         stats->rx_errors = old_stats->rx_errors +
10761                 get_stat64(&hw_stats->rx_errors);
10762         stats->tx_errors = old_stats->tx_errors +
10763                 get_stat64(&hw_stats->tx_errors) +
10764                 get_stat64(&hw_stats->tx_mac_errors) +
10765                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10766                 get_stat64(&hw_stats->tx_discards);
10767
10768         stats->multicast = old_stats->multicast +
10769                 get_stat64(&hw_stats->rx_mcast_packets);
10770         stats->collisions = old_stats->collisions +
10771                 get_stat64(&hw_stats->tx_collisions);
10772
10773         stats->rx_length_errors = old_stats->rx_length_errors +
10774                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10775                 get_stat64(&hw_stats->rx_undersize_packets);
10776
10777         stats->rx_over_errors = old_stats->rx_over_errors +
10778                 get_stat64(&hw_stats->rxbds_empty);
10779         stats->rx_frame_errors = old_stats->rx_frame_errors +
10780                 get_stat64(&hw_stats->rx_align_errors);
10781         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10782                 get_stat64(&hw_stats->tx_discards);
10783         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10784                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10785
10786         stats->rx_crc_errors = old_stats->rx_crc_errors +
10787                 tg3_calc_crc_errors(tp);
10788
10789         stats->rx_missed_errors = old_stats->rx_missed_errors +
10790                 get_stat64(&hw_stats->rx_discards);
10791
10792         stats->rx_dropped = tp->rx_dropped;
10793         stats->tx_dropped = tp->tx_dropped;
10794 }
10795
10796 static int tg3_get_regs_len(struct net_device *dev)
10797 {
10798         return TG3_REG_BLK_SIZE;
10799 }
10800
10801 static void tg3_get_regs(struct net_device *dev,
10802                 struct ethtool_regs *regs, void *_p)
10803 {
10804         struct tg3 *tp = netdev_priv(dev);
10805
10806         regs->version = 0;
10807
10808         memset(_p, 0, TG3_REG_BLK_SIZE);
10809
10810         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10811                 return;
10812
10813         tg3_full_lock(tp, 0);
10814
10815         tg3_dump_legacy_regs(tp, (u32 *)_p);
10816
10817         tg3_full_unlock(tp);
10818 }
10819
10820 static int tg3_get_eeprom_len(struct net_device *dev)
10821 {
10822         struct tg3 *tp = netdev_priv(dev);
10823
10824         return tp->nvram_size;
10825 }
10826
10827 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10828 {
10829         struct tg3 *tp = netdev_priv(dev);
10830         int ret;
10831         u8  *pd;
10832         u32 i, offset, len, b_offset, b_count;
10833         __be32 val;
10834
10835         if (tg3_flag(tp, NO_NVRAM))
10836                 return -EINVAL;
10837
10838         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10839                 return -EAGAIN;
10840
10841         offset = eeprom->offset;
10842         len = eeprom->len;
10843         eeprom->len = 0;
10844
10845         eeprom->magic = TG3_EEPROM_MAGIC;
10846
10847         if (offset & 3) {
10848                 /* adjustments to start on required 4 byte boundary */
10849                 b_offset = offset & 3;
10850                 b_count = 4 - b_offset;
10851                 if (b_count > len) {
10852                         /* i.e. offset=1 len=2 */
10853                         b_count = len;
10854                 }
10855                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10856                 if (ret)
10857                         return ret;
10858                 memcpy(data, ((char *)&val) + b_offset, b_count);
10859                 len -= b_count;
10860                 offset += b_count;
10861                 eeprom->len += b_count;
10862         }
10863
10864         /* read bytes up to the last 4 byte boundary */
10865         pd = &data[eeprom->len];
10866         for (i = 0; i < (len - (len & 3)); i += 4) {
10867                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10868                 if (ret) {
10869                         eeprom->len += i;
10870                         return ret;
10871                 }
10872                 memcpy(pd + i, &val, 4);
10873         }
10874         eeprom->len += i;
10875
10876         if (len & 3) {
10877                 /* read last bytes not ending on 4 byte boundary */
10878                 pd = &data[eeprom->len];
10879                 b_count = len & 3;
10880                 b_offset = offset + len - b_count;
10881                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10882                 if (ret)
10883                         return ret;
10884                 memcpy(pd, &val, b_count);
10885                 eeprom->len += b_count;
10886         }
10887         return 0;
10888 }
10889
10890 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10891 {
10892         struct tg3 *tp = netdev_priv(dev);
10893         int ret;
10894         u32 offset, len, b_offset, odd_len;
10895         u8 *buf;
10896         __be32 start, end;
10897
10898         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10899                 return -EAGAIN;
10900
10901         if (tg3_flag(tp, NO_NVRAM) ||
10902             eeprom->magic != TG3_EEPROM_MAGIC)
10903                 return -EINVAL;
10904
10905         offset = eeprom->offset;
10906         len = eeprom->len;
10907
10908         if ((b_offset = (offset & 3))) {
10909                 /* adjustments to start on required 4 byte boundary */
10910                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10911                 if (ret)
10912                         return ret;
10913                 len += b_offset;
10914                 offset &= ~3;
10915                 if (len < 4)
10916                         len = 4;
10917         }
10918
10919         odd_len = 0;
10920         if (len & 3) {
10921                 /* adjustments to end on required 4 byte boundary */
10922                 odd_len = 1;
10923                 len = (len + 3) & ~3;
10924                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10925                 if (ret)
10926                         return ret;
10927         }
10928
10929         buf = data;
10930         if (b_offset || odd_len) {
10931                 buf = kmalloc(len, GFP_KERNEL);
10932                 if (!buf)
10933                         return -ENOMEM;
10934                 if (b_offset)
10935                         memcpy(buf, &start, 4);
10936                 if (odd_len)
10937                         memcpy(buf+len-4, &end, 4);
10938                 memcpy(buf + b_offset, data, eeprom->len);
10939         }
10940
10941         ret = tg3_nvram_write_block(tp, offset, len, buf);
10942
10943         if (buf != data)
10944                 kfree(buf);
10945
10946         return ret;
10947 }
10948
10949 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10950 {
10951         struct tg3 *tp = netdev_priv(dev);
10952
10953         if (tg3_flag(tp, USE_PHYLIB)) {
10954                 struct phy_device *phydev;
10955                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10956                         return -EAGAIN;
10957                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10958                 return phy_ethtool_gset(phydev, cmd);
10959         }
10960
10961         cmd->supported = (SUPPORTED_Autoneg);
10962
10963         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10964                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10965                                    SUPPORTED_1000baseT_Full);
10966
10967         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10968                 cmd->supported |= (SUPPORTED_100baseT_Half |
10969                                   SUPPORTED_100baseT_Full |
10970                                   SUPPORTED_10baseT_Half |
10971                                   SUPPORTED_10baseT_Full |
10972                                   SUPPORTED_TP);
10973                 cmd->port = PORT_TP;
10974         } else {
10975                 cmd->supported |= SUPPORTED_FIBRE;
10976                 cmd->port = PORT_FIBRE;
10977         }
10978
10979         cmd->advertising = tp->link_config.advertising;
10980         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10981                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10982                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10983                                 cmd->advertising |= ADVERTISED_Pause;
10984                         } else {
10985                                 cmd->advertising |= ADVERTISED_Pause |
10986                                                     ADVERTISED_Asym_Pause;
10987                         }
10988                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10989                         cmd->advertising |= ADVERTISED_Asym_Pause;
10990                 }
10991         }
10992         if (netif_running(dev) && tp->link_up) {
10993                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10994                 cmd->duplex = tp->link_config.active_duplex;
10995                 cmd->lp_advertising = tp->link_config.rmt_adv;
10996                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10997                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10998                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10999                         else
11000                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11001                 }
11002         } else {
11003                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11004                 cmd->duplex = DUPLEX_UNKNOWN;
11005                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11006         }
11007         cmd->phy_address = tp->phy_addr;
11008         cmd->transceiver = XCVR_INTERNAL;
11009         cmd->autoneg = tp->link_config.autoneg;
11010         cmd->maxtxpkt = 0;
11011         cmd->maxrxpkt = 0;
11012         return 0;
11013 }
11014
11015 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11016 {
11017         struct tg3 *tp = netdev_priv(dev);
11018         u32 speed = ethtool_cmd_speed(cmd);
11019
11020         if (tg3_flag(tp, USE_PHYLIB)) {
11021                 struct phy_device *phydev;
11022                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11023                         return -EAGAIN;
11024                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11025                 return phy_ethtool_sset(phydev, cmd);
11026         }
11027
11028         if (cmd->autoneg != AUTONEG_ENABLE &&
11029             cmd->autoneg != AUTONEG_DISABLE)
11030                 return -EINVAL;
11031
11032         if (cmd->autoneg == AUTONEG_DISABLE &&
11033             cmd->duplex != DUPLEX_FULL &&
11034             cmd->duplex != DUPLEX_HALF)
11035                 return -EINVAL;
11036
11037         if (cmd->autoneg == AUTONEG_ENABLE) {
11038                 u32 mask = ADVERTISED_Autoneg |
11039                            ADVERTISED_Pause |
11040                            ADVERTISED_Asym_Pause;
11041
11042                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11043                         mask |= ADVERTISED_1000baseT_Half |
11044                                 ADVERTISED_1000baseT_Full;
11045
11046                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11047                         mask |= ADVERTISED_100baseT_Half |
11048                                 ADVERTISED_100baseT_Full |
11049                                 ADVERTISED_10baseT_Half |
11050                                 ADVERTISED_10baseT_Full |
11051                                 ADVERTISED_TP;
11052                 else
11053                         mask |= ADVERTISED_FIBRE;
11054
11055                 if (cmd->advertising & ~mask)
11056                         return -EINVAL;
11057
11058                 mask &= (ADVERTISED_1000baseT_Half |
11059                          ADVERTISED_1000baseT_Full |
11060                          ADVERTISED_100baseT_Half |
11061                          ADVERTISED_100baseT_Full |
11062                          ADVERTISED_10baseT_Half |
11063                          ADVERTISED_10baseT_Full);
11064
11065                 cmd->advertising &= mask;
11066         } else {
11067                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11068                         if (speed != SPEED_1000)
11069                                 return -EINVAL;
11070
11071                         if (cmd->duplex != DUPLEX_FULL)
11072                                 return -EINVAL;
11073                 } else {
11074                         if (speed != SPEED_100 &&
11075                             speed != SPEED_10)
11076                                 return -EINVAL;
11077                 }
11078         }
11079
11080         tg3_full_lock(tp, 0);
11081
11082         tp->link_config.autoneg = cmd->autoneg;
11083         if (cmd->autoneg == AUTONEG_ENABLE) {
11084                 tp->link_config.advertising = (cmd->advertising |
11085                                               ADVERTISED_Autoneg);
11086                 tp->link_config.speed = SPEED_UNKNOWN;
11087                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11088         } else {
11089                 tp->link_config.advertising = 0;
11090                 tp->link_config.speed = speed;
11091                 tp->link_config.duplex = cmd->duplex;
11092         }
11093
11094         if (netif_running(dev))
11095                 tg3_setup_phy(tp, 1);
11096
11097         tg3_full_unlock(tp);
11098
11099         return 0;
11100 }
11101
11102 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11103 {
11104         struct tg3 *tp = netdev_priv(dev);
11105
11106         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11107         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11108         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11109         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11110 }
11111
11112 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11113 {
11114         struct tg3 *tp = netdev_priv(dev);
11115
11116         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11117                 wol->supported = WAKE_MAGIC;
11118         else
11119                 wol->supported = 0;
11120         wol->wolopts = 0;
11121         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11122                 wol->wolopts = WAKE_MAGIC;
11123         memset(&wol->sopass, 0, sizeof(wol->sopass));
11124 }
11125
11126 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11127 {
11128         struct tg3 *tp = netdev_priv(dev);
11129         struct device *dp = &tp->pdev->dev;
11130
11131         if (wol->wolopts & ~WAKE_MAGIC)
11132                 return -EINVAL;
11133         if ((wol->wolopts & WAKE_MAGIC) &&
11134             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11135                 return -EINVAL;
11136
11137         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11138
11139         spin_lock_bh(&tp->lock);
11140         if (device_may_wakeup(dp))
11141                 tg3_flag_set(tp, WOL_ENABLE);
11142         else
11143                 tg3_flag_clear(tp, WOL_ENABLE);
11144         spin_unlock_bh(&tp->lock);
11145
11146         return 0;
11147 }
11148
11149 static u32 tg3_get_msglevel(struct net_device *dev)
11150 {
11151         struct tg3 *tp = netdev_priv(dev);
11152         return tp->msg_enable;
11153 }
11154
11155 static void tg3_set_msglevel(struct net_device *dev, u32 value)
11156 {
11157         struct tg3 *tp = netdev_priv(dev);
11158         tp->msg_enable = value;
11159 }
11160
11161 static int tg3_nway_reset(struct net_device *dev)
11162 {
11163         struct tg3 *tp = netdev_priv(dev);
11164         int r;
11165
11166         if (!netif_running(dev))
11167                 return -EAGAIN;
11168
11169         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11170                 return -EINVAL;
11171
11172         if (tg3_flag(tp, USE_PHYLIB)) {
11173                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11174                         return -EAGAIN;
11175                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
11176         } else {
11177                 u32 bmcr;
11178
11179                 spin_lock_bh(&tp->lock);
11180                 r = -EINVAL;
11181                 tg3_readphy(tp, MII_BMCR, &bmcr);
11182                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
11183                     ((bmcr & BMCR_ANENABLE) ||
11184                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
11185                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
11186                                                    BMCR_ANENABLE);
11187                         r = 0;
11188                 }
11189                 spin_unlock_bh(&tp->lock);
11190         }
11191
11192         return r;
11193 }
11194
11195 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11196 {
11197         struct tg3 *tp = netdev_priv(dev);
11198
11199         ering->rx_max_pending = tp->rx_std_ring_mask;
11200         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11201                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
11202         else
11203                 ering->rx_jumbo_max_pending = 0;
11204
11205         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
11206
11207         ering->rx_pending = tp->rx_pending;
11208         if (tg3_flag(tp, JUMBO_RING_ENABLE))
11209                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
11210         else
11211                 ering->rx_jumbo_pending = 0;
11212
11213         ering->tx_pending = tp->napi[0].tx_pending;
11214 }
11215
11216 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
11217 {
11218         struct tg3 *tp = netdev_priv(dev);
11219         int i, irq_sync = 0, err = 0;
11220
11221         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
11222             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
11223             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
11224             (ering->tx_pending <= MAX_SKB_FRAGS) ||
11225             (tg3_flag(tp, TSO_BUG) &&
11226              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
11227                 return -EINVAL;
11228
11229         if (netif_running(dev)) {
11230                 tg3_phy_stop(tp);
11231                 tg3_netif_stop(tp);
11232                 irq_sync = 1;
11233         }
11234
11235         tg3_full_lock(tp, irq_sync);
11236
11237         tp->rx_pending = ering->rx_pending;
11238
11239         if (tg3_flag(tp, MAX_RXPEND_64) &&
11240             tp->rx_pending > 63)
11241                 tp->rx_pending = 63;
11242         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
11243
11244         for (i = 0; i < tp->irq_max; i++)
11245                 tp->napi[i].tx_pending = ering->tx_pending;
11246
11247         if (netif_running(dev)) {
11248                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11249                 err = tg3_restart_hw(tp, 1);
11250                 if (!err)
11251                         tg3_netif_start(tp);
11252         }
11253
11254         tg3_full_unlock(tp);
11255
11256         if (irq_sync && !err)
11257                 tg3_phy_start(tp);
11258
11259         return err;
11260 }
11261
11262 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11263 {
11264         struct tg3 *tp = netdev_priv(dev);
11265
11266         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
11267
11268         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
11269                 epause->rx_pause = 1;
11270         else
11271                 epause->rx_pause = 0;
11272
11273         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
11274                 epause->tx_pause = 1;
11275         else
11276                 epause->tx_pause = 0;
11277 }
11278
11279 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
11280 {
11281         struct tg3 *tp = netdev_priv(dev);
11282         int err = 0;
11283
11284         if (tg3_flag(tp, USE_PHYLIB)) {
11285                 u32 newadv;
11286                 struct phy_device *phydev;
11287
11288                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11289
11290                 if (!(phydev->supported & SUPPORTED_Pause) ||
11291                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
11292                      (epause->rx_pause != epause->tx_pause)))
11293                         return -EINVAL;
11294
11295                 tp->link_config.flowctrl = 0;
11296                 if (epause->rx_pause) {
11297                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11298
11299                         if (epause->tx_pause) {
11300                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
11301                                 newadv = ADVERTISED_Pause;
11302                         } else
11303                                 newadv = ADVERTISED_Pause |
11304                                          ADVERTISED_Asym_Pause;
11305                 } else if (epause->tx_pause) {
11306                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11307                         newadv = ADVERTISED_Asym_Pause;
11308                 } else
11309                         newadv = 0;
11310
11311                 if (epause->autoneg)
11312                         tg3_flag_set(tp, PAUSE_AUTONEG);
11313                 else
11314                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11315
11316                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
11317                         u32 oldadv = phydev->advertising &
11318                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
11319                         if (oldadv != newadv) {
11320                                 phydev->advertising &=
11321                                         ~(ADVERTISED_Pause |
11322                                           ADVERTISED_Asym_Pause);
11323                                 phydev->advertising |= newadv;
11324                                 if (phydev->autoneg) {
11325                                         /*
11326                                          * Always renegotiate the link to
11327                                          * inform our link partner of our
11328                                          * flow control settings, even if the
11329                                          * flow control is forced.  Let
11330                                          * tg3_adjust_link() do the final
11331                                          * flow control setup.
11332                                          */
11333                                         return phy_start_aneg(phydev);
11334                                 }
11335                         }
11336
11337                         if (!epause->autoneg)
11338                                 tg3_setup_flow_control(tp, 0, 0);
11339                 } else {
11340                         tp->link_config.advertising &=
11341                                         ~(ADVERTISED_Pause |
11342                                           ADVERTISED_Asym_Pause);
11343                         tp->link_config.advertising |= newadv;
11344                 }
11345         } else {
11346                 int irq_sync = 0;
11347
11348                 if (netif_running(dev)) {
11349                         tg3_netif_stop(tp);
11350                         irq_sync = 1;
11351                 }
11352
11353                 tg3_full_lock(tp, irq_sync);
11354
11355                 if (epause->autoneg)
11356                         tg3_flag_set(tp, PAUSE_AUTONEG);
11357                 else
11358                         tg3_flag_clear(tp, PAUSE_AUTONEG);
11359                 if (epause->rx_pause)
11360                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
11361                 else
11362                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
11363                 if (epause->tx_pause)
11364                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
11365                 else
11366                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
11367
11368                 if (netif_running(dev)) {
11369                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11370                         err = tg3_restart_hw(tp, 1);
11371                         if (!err)
11372                                 tg3_netif_start(tp);
11373                 }
11374
11375                 tg3_full_unlock(tp);
11376         }
11377
11378         return err;
11379 }
11380
11381 static int tg3_get_sset_count(struct net_device *dev, int sset)
11382 {
11383         switch (sset) {
11384         case ETH_SS_TEST:
11385                 return TG3_NUM_TEST;
11386         case ETH_SS_STATS:
11387                 return TG3_NUM_STATS;
11388         default:
11389                 return -EOPNOTSUPP;
11390         }
11391 }
11392
11393 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
11394                          u32 *rules __always_unused)
11395 {
11396         struct tg3 *tp = netdev_priv(dev);
11397
11398         if (!tg3_flag(tp, SUPPORT_MSIX))
11399                 return -EOPNOTSUPP;
11400
11401         switch (info->cmd) {
11402         case ETHTOOL_GRXRINGS:
11403                 if (netif_running(tp->dev))
11404                         info->data = tp->rxq_cnt;
11405                 else {
11406                         info->data = num_online_cpus();
11407                         if (info->data > TG3_RSS_MAX_NUM_QS)
11408                                 info->data = TG3_RSS_MAX_NUM_QS;
11409                 }
11410
11411                 /* The first interrupt vector only
11412                  * handles link interrupts.
11413                  */
11414                 info->data -= 1;
11415                 return 0;
11416
11417         default:
11418                 return -EOPNOTSUPP;
11419         }
11420 }
11421
11422 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
11423 {
11424         u32 size = 0;
11425         struct tg3 *tp = netdev_priv(dev);
11426
11427         if (tg3_flag(tp, SUPPORT_MSIX))
11428                 size = TG3_RSS_INDIR_TBL_SIZE;
11429
11430         return size;
11431 }
11432
11433 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
11434 {
11435         struct tg3 *tp = netdev_priv(dev);
11436         int i;
11437
11438         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11439                 indir[i] = tp->rss_ind_tbl[i];
11440
11441         return 0;
11442 }
11443
11444 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
11445 {
11446         struct tg3 *tp = netdev_priv(dev);
11447         size_t i;
11448
11449         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
11450                 tp->rss_ind_tbl[i] = indir[i];
11451
11452         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
11453                 return 0;
11454
11455         /* It is legal to write the indirection
11456          * table while the device is running.
11457          */
11458         tg3_full_lock(tp, 0);
11459         tg3_rss_write_indir_tbl(tp);
11460         tg3_full_unlock(tp);
11461
11462         return 0;
11463 }
11464
11465 static void tg3_get_channels(struct net_device *dev,
11466                              struct ethtool_channels *channel)
11467 {
11468         struct tg3 *tp = netdev_priv(dev);
11469         u32 deflt_qs = netif_get_num_default_rss_queues();
11470
11471         channel->max_rx = tp->rxq_max;
11472         channel->max_tx = tp->txq_max;
11473
11474         if (netif_running(dev)) {
11475                 channel->rx_count = tp->rxq_cnt;
11476                 channel->tx_count = tp->txq_cnt;
11477         } else {
11478                 if (tp->rxq_req)
11479                         channel->rx_count = tp->rxq_req;
11480                 else
11481                         channel->rx_count = min(deflt_qs, tp->rxq_max);
11482
11483                 if (tp->txq_req)
11484                         channel->tx_count = tp->txq_req;
11485                 else
11486                         channel->tx_count = min(deflt_qs, tp->txq_max);
11487         }
11488 }
11489
11490 static int tg3_set_channels(struct net_device *dev,
11491                             struct ethtool_channels *channel)
11492 {
11493         struct tg3 *tp = netdev_priv(dev);
11494
11495         if (!tg3_flag(tp, SUPPORT_MSIX))
11496                 return -EOPNOTSUPP;
11497
11498         if (channel->rx_count > tp->rxq_max ||
11499             channel->tx_count > tp->txq_max)
11500                 return -EINVAL;
11501
11502         tp->rxq_req = channel->rx_count;
11503         tp->txq_req = channel->tx_count;
11504
11505         if (!netif_running(dev))
11506                 return 0;
11507
11508         tg3_stop(tp);
11509
11510         tg3_carrier_off(tp);
11511
11512         tg3_start(tp, true, false, false);
11513
11514         return 0;
11515 }
11516
11517 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11518 {
11519         switch (stringset) {
11520         case ETH_SS_STATS:
11521                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
11522                 break;
11523         case ETH_SS_TEST:
11524                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
11525                 break;
11526         default:
11527                 WARN_ON(1);     /* we need a WARN() */
11528                 break;
11529         }
11530 }
11531
11532 static int tg3_set_phys_id(struct net_device *dev,
11533                             enum ethtool_phys_id_state state)
11534 {
11535         struct tg3 *tp = netdev_priv(dev);
11536
11537         if (!netif_running(tp->dev))
11538                 return -EAGAIN;
11539
11540         switch (state) {
11541         case ETHTOOL_ID_ACTIVE:
11542                 return 1;       /* cycle on/off once per second */
11543
11544         case ETHTOOL_ID_ON:
11545                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11546                      LED_CTRL_1000MBPS_ON |
11547                      LED_CTRL_100MBPS_ON |
11548                      LED_CTRL_10MBPS_ON |
11549                      LED_CTRL_TRAFFIC_OVERRIDE |
11550                      LED_CTRL_TRAFFIC_BLINK |
11551                      LED_CTRL_TRAFFIC_LED);
11552                 break;
11553
11554         case ETHTOOL_ID_OFF:
11555                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11556                      LED_CTRL_TRAFFIC_OVERRIDE);
11557                 break;
11558
11559         case ETHTOOL_ID_INACTIVE:
11560                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11561                 break;
11562         }
11563
11564         return 0;
11565 }
11566
11567 static void tg3_get_ethtool_stats(struct net_device *dev,
11568                                    struct ethtool_stats *estats, u64 *tmp_stats)
11569 {
11570         struct tg3 *tp = netdev_priv(dev);
11571
11572         if (tp->hw_stats)
11573                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11574         else
11575                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11576 }
11577
11578 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11579 {
11580         int i;
11581         __be32 *buf;
11582         u32 offset = 0, len = 0;
11583         u32 magic, val;
11584
11585         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11586                 return NULL;
11587
11588         if (magic == TG3_EEPROM_MAGIC) {
11589                 for (offset = TG3_NVM_DIR_START;
11590                      offset < TG3_NVM_DIR_END;
11591                      offset += TG3_NVM_DIRENT_SIZE) {
11592                         if (tg3_nvram_read(tp, offset, &val))
11593                                 return NULL;
11594
11595                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11596                             TG3_NVM_DIRTYPE_EXTVPD)
11597                                 break;
11598                 }
11599
11600                 if (offset != TG3_NVM_DIR_END) {
11601                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11602                         if (tg3_nvram_read(tp, offset + 4, &offset))
11603                                 return NULL;
11604
11605                         offset = tg3_nvram_logical_addr(tp, offset);
11606                 }
11607         }
11608
11609         if (!offset || !len) {
11610                 offset = TG3_NVM_VPD_OFF;
11611                 len = TG3_NVM_VPD_LEN;
11612         }
11613
11614         buf = kmalloc(len, GFP_KERNEL);
11615         if (buf == NULL)
11616                 return NULL;
11617
11618         if (magic == TG3_EEPROM_MAGIC) {
11619                 for (i = 0; i < len; i += 4) {
11620                         /* The data is in little-endian format in NVRAM.
11621                          * Use the big-endian read routines to preserve
11622                          * the byte order as it exists in NVRAM.
11623                          */
11624                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11625                                 goto error;
11626                 }
11627         } else {
11628                 u8 *ptr;
11629                 ssize_t cnt;
11630                 unsigned int pos = 0;
11631
11632                 ptr = (u8 *)&buf[0];
11633                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11634                         cnt = pci_read_vpd(tp->pdev, pos,
11635                                            len - pos, ptr);
11636                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11637                                 cnt = 0;
11638                         else if (cnt < 0)
11639                                 goto error;
11640                 }
11641                 if (pos != len)
11642                         goto error;
11643         }
11644
11645         *vpdlen = len;
11646
11647         return buf;
11648
11649 error:
11650         kfree(buf);
11651         return NULL;
11652 }
11653
11654 #define NVRAM_TEST_SIZE 0x100
11655 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11656 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11657 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11658 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11659 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11660 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11661 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11662 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11663
11664 static int tg3_test_nvram(struct tg3 *tp)
11665 {
11666         u32 csum, magic, len;
11667         __be32 *buf;
11668         int i, j, k, err = 0, size;
11669
11670         if (tg3_flag(tp, NO_NVRAM))
11671                 return 0;
11672
11673         if (tg3_nvram_read(tp, 0, &magic) != 0)
11674                 return -EIO;
11675
11676         if (magic == TG3_EEPROM_MAGIC)
11677                 size = NVRAM_TEST_SIZE;
11678         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11679                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11680                     TG3_EEPROM_SB_FORMAT_1) {
11681                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11682                         case TG3_EEPROM_SB_REVISION_0:
11683                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11684                                 break;
11685                         case TG3_EEPROM_SB_REVISION_2:
11686                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11687                                 break;
11688                         case TG3_EEPROM_SB_REVISION_3:
11689                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11690                                 break;
11691                         case TG3_EEPROM_SB_REVISION_4:
11692                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11693                                 break;
11694                         case TG3_EEPROM_SB_REVISION_5:
11695                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11696                                 break;
11697                         case TG3_EEPROM_SB_REVISION_6:
11698                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11699                                 break;
11700                         default:
11701                                 return -EIO;
11702                         }
11703                 } else
11704                         return 0;
11705         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11706                 size = NVRAM_SELFBOOT_HW_SIZE;
11707         else
11708                 return -EIO;
11709
11710         buf = kmalloc(size, GFP_KERNEL);
11711         if (buf == NULL)
11712                 return -ENOMEM;
11713
11714         err = -EIO;
11715         for (i = 0, j = 0; i < size; i += 4, j++) {
11716                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11717                 if (err)
11718                         break;
11719         }
11720         if (i < size)
11721                 goto out;
11722
11723         /* Selfboot format */
11724         magic = be32_to_cpu(buf[0]);
11725         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11726             TG3_EEPROM_MAGIC_FW) {
11727                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11728
11729                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11730                     TG3_EEPROM_SB_REVISION_2) {
11731                         /* For rev 2, the csum doesn't include the MBA. */
11732                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11733                                 csum8 += buf8[i];
11734                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11735                                 csum8 += buf8[i];
11736                 } else {
11737                         for (i = 0; i < size; i++)
11738                                 csum8 += buf8[i];
11739                 }
11740
11741                 if (csum8 == 0) {
11742                         err = 0;
11743                         goto out;
11744                 }
11745
11746                 err = -EIO;
11747                 goto out;
11748         }
11749
11750         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11751             TG3_EEPROM_MAGIC_HW) {
11752                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11753                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11754                 u8 *buf8 = (u8 *) buf;
11755
11756                 /* Separate the parity bits and the data bytes.  */
11757                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11758                         if ((i == 0) || (i == 8)) {
11759                                 int l;
11760                                 u8 msk;
11761
11762                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11763                                         parity[k++] = buf8[i] & msk;
11764                                 i++;
11765                         } else if (i == 16) {
11766                                 int l;
11767                                 u8 msk;
11768
11769                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11770                                         parity[k++] = buf8[i] & msk;
11771                                 i++;
11772
11773                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11774                                         parity[k++] = buf8[i] & msk;
11775                                 i++;
11776                         }
11777                         data[j++] = buf8[i];
11778                 }
11779
11780                 err = -EIO;
11781                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11782                         u8 hw8 = hweight8(data[i]);
11783
11784                         if ((hw8 & 0x1) && parity[i])
11785                                 goto out;
11786                         else if (!(hw8 & 0x1) && !parity[i])
11787                                 goto out;
11788                 }
11789                 err = 0;
11790                 goto out;
11791         }
11792
11793         err = -EIO;
11794
11795         /* Bootstrap checksum at offset 0x10 */
11796         csum = calc_crc((unsigned char *) buf, 0x10);
11797         if (csum != le32_to_cpu(buf[0x10/4]))
11798                 goto out;
11799
11800         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11801         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11802         if (csum != le32_to_cpu(buf[0xfc/4]))
11803                 goto out;
11804
11805         kfree(buf);
11806
11807         buf = tg3_vpd_readblock(tp, &len);
11808         if (!buf)
11809                 return -ENOMEM;
11810
11811         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11812         if (i > 0) {
11813                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11814                 if (j < 0)
11815                         goto out;
11816
11817                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11818                         goto out;
11819
11820                 i += PCI_VPD_LRDT_TAG_SIZE;
11821                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11822                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11823                 if (j > 0) {
11824                         u8 csum8 = 0;
11825
11826                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11827
11828                         for (i = 0; i <= j; i++)
11829                                 csum8 += ((u8 *)buf)[i];
11830
11831                         if (csum8)
11832                                 goto out;
11833                 }
11834         }
11835
11836         err = 0;
11837
11838 out:
11839         kfree(buf);
11840         return err;
11841 }
11842
11843 #define TG3_SERDES_TIMEOUT_SEC  2
11844 #define TG3_COPPER_TIMEOUT_SEC  6
11845
11846 static int tg3_test_link(struct tg3 *tp)
11847 {
11848         int i, max;
11849
11850         if (!netif_running(tp->dev))
11851                 return -ENODEV;
11852
11853         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11854                 max = TG3_SERDES_TIMEOUT_SEC;
11855         else
11856                 max = TG3_COPPER_TIMEOUT_SEC;
11857
11858         for (i = 0; i < max; i++) {
11859                 if (tp->link_up)
11860                         return 0;
11861
11862                 if (msleep_interruptible(1000))
11863                         break;
11864         }
11865
11866         return -EIO;
11867 }
11868
11869 /* Only test the commonly used registers */
11870 static int tg3_test_registers(struct tg3 *tp)
11871 {
11872         int i, is_5705, is_5750;
11873         u32 offset, read_mask, write_mask, val, save_val, read_val;
11874         static struct {
11875                 u16 offset;
11876                 u16 flags;
11877 #define TG3_FL_5705     0x1
11878 #define TG3_FL_NOT_5705 0x2
11879 #define TG3_FL_NOT_5788 0x4
11880 #define TG3_FL_NOT_5750 0x8
11881                 u32 read_mask;
11882                 u32 write_mask;
11883         } reg_tbl[] = {
11884                 /* MAC Control Registers */
11885                 { MAC_MODE, TG3_FL_NOT_5705,
11886                         0x00000000, 0x00ef6f8c },
11887                 { MAC_MODE, TG3_FL_5705,
11888                         0x00000000, 0x01ef6b8c },
11889                 { MAC_STATUS, TG3_FL_NOT_5705,
11890                         0x03800107, 0x00000000 },
11891                 { MAC_STATUS, TG3_FL_5705,
11892                         0x03800100, 0x00000000 },
11893                 { MAC_ADDR_0_HIGH, 0x0000,
11894                         0x00000000, 0x0000ffff },
11895                 { MAC_ADDR_0_LOW, 0x0000,
11896                         0x00000000, 0xffffffff },
11897                 { MAC_RX_MTU_SIZE, 0x0000,
11898                         0x00000000, 0x0000ffff },
11899                 { MAC_TX_MODE, 0x0000,
11900                         0x00000000, 0x00000070 },
11901                 { MAC_TX_LENGTHS, 0x0000,
11902                         0x00000000, 0x00003fff },
11903                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11904                         0x00000000, 0x000007fc },
11905                 { MAC_RX_MODE, TG3_FL_5705,
11906                         0x00000000, 0x000007dc },
11907                 { MAC_HASH_REG_0, 0x0000,
11908                         0x00000000, 0xffffffff },
11909                 { MAC_HASH_REG_1, 0x0000,
11910                         0x00000000, 0xffffffff },
11911                 { MAC_HASH_REG_2, 0x0000,
11912                         0x00000000, 0xffffffff },
11913                 { MAC_HASH_REG_3, 0x0000,
11914                         0x00000000, 0xffffffff },
11915
11916                 /* Receive Data and Receive BD Initiator Control Registers. */
11917                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11918                         0x00000000, 0xffffffff },
11919                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11920                         0x00000000, 0xffffffff },
11921                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11922                         0x00000000, 0x00000003 },
11923                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11924                         0x00000000, 0xffffffff },
11925                 { RCVDBDI_STD_BD+0, 0x0000,
11926                         0x00000000, 0xffffffff },
11927                 { RCVDBDI_STD_BD+4, 0x0000,
11928                         0x00000000, 0xffffffff },
11929                 { RCVDBDI_STD_BD+8, 0x0000,
11930                         0x00000000, 0xffff0002 },
11931                 { RCVDBDI_STD_BD+0xc, 0x0000,
11932                         0x00000000, 0xffffffff },
11933
11934                 /* Receive BD Initiator Control Registers. */
11935                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11936                         0x00000000, 0xffffffff },
11937                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11938                         0x00000000, 0x000003ff },
11939                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11940                         0x00000000, 0xffffffff },
11941
11942                 /* Host Coalescing Control Registers. */
11943                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11944                         0x00000000, 0x00000004 },
11945                 { HOSTCC_MODE, TG3_FL_5705,
11946                         0x00000000, 0x000000f6 },
11947                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11948                         0x00000000, 0xffffffff },
11949                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11950                         0x00000000, 0x000003ff },
11951                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11952                         0x00000000, 0xffffffff },
11953                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11954                         0x00000000, 0x000003ff },
11955                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11956                         0x00000000, 0xffffffff },
11957                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11958                         0x00000000, 0x000000ff },
11959                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11960                         0x00000000, 0xffffffff },
11961                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11962                         0x00000000, 0x000000ff },
11963                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11964                         0x00000000, 0xffffffff },
11965                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11966                         0x00000000, 0xffffffff },
11967                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11968                         0x00000000, 0xffffffff },
11969                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11970                         0x00000000, 0x000000ff },
11971                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11972                         0x00000000, 0xffffffff },
11973                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11974                         0x00000000, 0x000000ff },
11975                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11976                         0x00000000, 0xffffffff },
11977                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11978                         0x00000000, 0xffffffff },
11979                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11980                         0x00000000, 0xffffffff },
11981                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11982                         0x00000000, 0xffffffff },
11983                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11984                         0x00000000, 0xffffffff },
11985                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11986                         0xffffffff, 0x00000000 },
11987                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11988                         0xffffffff, 0x00000000 },
11989
11990                 /* Buffer Manager Control Registers. */
11991                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11992                         0x00000000, 0x007fff80 },
11993                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11994                         0x00000000, 0x007fffff },
11995                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11996                         0x00000000, 0x0000003f },
11997                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11998                         0x00000000, 0x000001ff },
11999                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12000                         0x00000000, 0x000001ff },
12001                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12002                         0xffffffff, 0x00000000 },
12003                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12004                         0xffffffff, 0x00000000 },
12005
12006                 /* Mailbox Registers */
12007                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12008                         0x00000000, 0x000001ff },
12009                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12010                         0x00000000, 0x000001ff },
12011                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12012                         0x00000000, 0x000007ff },
12013                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12014                         0x00000000, 0x000001ff },
12015
12016                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12017         };
12018
12019         is_5705 = is_5750 = 0;
12020         if (tg3_flag(tp, 5705_PLUS)) {
12021                 is_5705 = 1;
12022                 if (tg3_flag(tp, 5750_PLUS))
12023                         is_5750 = 1;
12024         }
12025
12026         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12027                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12028                         continue;
12029
12030                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12031                         continue;
12032
12033                 if (tg3_flag(tp, IS_5788) &&
12034                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12035                         continue;
12036
12037                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12038                         continue;
12039
12040                 offset = (u32) reg_tbl[i].offset;
12041                 read_mask = reg_tbl[i].read_mask;
12042                 write_mask = reg_tbl[i].write_mask;
12043
12044                 /* Save the original register content */
12045                 save_val = tr32(offset);
12046
12047                 /* Determine the read-only value. */
12048                 read_val = save_val & read_mask;
12049
12050                 /* Write zero to the register, then make sure the read-only bits
12051                  * are not changed and the read/write bits are all zeros.
12052                  */
12053                 tw32(offset, 0);
12054
12055                 val = tr32(offset);
12056
12057                 /* Test the read-only and read/write bits. */
12058                 if (((val & read_mask) != read_val) || (val & write_mask))
12059                         goto out;
12060
12061                 /* Write ones to all the bits defined by RdMask and WrMask, then
12062                  * make sure the read-only bits are not changed and the
12063                  * read/write bits are all ones.
12064                  */
12065                 tw32(offset, read_mask | write_mask);
12066
12067                 val = tr32(offset);
12068
12069                 /* Test the read-only bits. */
12070                 if ((val & read_mask) != read_val)
12071                         goto out;
12072
12073                 /* Test the read/write bits. */
12074                 if ((val & write_mask) != write_mask)
12075                         goto out;
12076
12077                 tw32(offset, save_val);
12078         }
12079
12080         return 0;
12081
12082 out:
12083         if (netif_msg_hw(tp))
12084                 netdev_err(tp->dev,
12085                            "Register test failed at offset %x\n", offset);
12086         tw32(offset, save_val);
12087         return -EIO;
12088 }
12089
12090 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12091 {
12092         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12093         int i;
12094         u32 j;
12095
12096         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12097                 for (j = 0; j < len; j += 4) {
12098                         u32 val;
12099
12100                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12101                         tg3_read_mem(tp, offset + j, &val);
12102                         if (val != test_pattern[i])
12103                                 return -EIO;
12104                 }
12105         }
12106         return 0;
12107 }
12108
12109 static int tg3_test_memory(struct tg3 *tp)
12110 {
12111         static struct mem_entry {
12112                 u32 offset;
12113                 u32 len;
12114         } mem_tbl_570x[] = {
12115                 { 0x00000000, 0x00b50},
12116                 { 0x00002000, 0x1c000},
12117                 { 0xffffffff, 0x00000}
12118         }, mem_tbl_5705[] = {
12119                 { 0x00000100, 0x0000c},
12120                 { 0x00000200, 0x00008},
12121                 { 0x00004000, 0x00800},
12122                 { 0x00006000, 0x01000},
12123                 { 0x00008000, 0x02000},
12124                 { 0x00010000, 0x0e000},
12125                 { 0xffffffff, 0x00000}
12126         }, mem_tbl_5755[] = {
12127                 { 0x00000200, 0x00008},
12128                 { 0x00004000, 0x00800},
12129                 { 0x00006000, 0x00800},
12130                 { 0x00008000, 0x02000},
12131                 { 0x00010000, 0x0c000},
12132                 { 0xffffffff, 0x00000}
12133         }, mem_tbl_5906[] = {
12134                 { 0x00000200, 0x00008},
12135                 { 0x00004000, 0x00400},
12136                 { 0x00006000, 0x00400},
12137                 { 0x00008000, 0x01000},
12138                 { 0x00010000, 0x01000},
12139                 { 0xffffffff, 0x00000}
12140         }, mem_tbl_5717[] = {
12141                 { 0x00000200, 0x00008},
12142                 { 0x00010000, 0x0a000},
12143                 { 0x00020000, 0x13c00},
12144                 { 0xffffffff, 0x00000}
12145         }, mem_tbl_57765[] = {
12146                 { 0x00000200, 0x00008},
12147                 { 0x00004000, 0x00800},
12148                 { 0x00006000, 0x09800},
12149                 { 0x00010000, 0x0a000},
12150                 { 0xffffffff, 0x00000}
12151         };
12152         struct mem_entry *mem_tbl;
12153         int err = 0;
12154         int i;
12155
12156         if (tg3_flag(tp, 5717_PLUS))
12157                 mem_tbl = mem_tbl_5717;
12158         else if (tg3_flag(tp, 57765_CLASS))
12159                 mem_tbl = mem_tbl_57765;
12160         else if (tg3_flag(tp, 5755_PLUS))
12161                 mem_tbl = mem_tbl_5755;
12162         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12163                 mem_tbl = mem_tbl_5906;
12164         else if (tg3_flag(tp, 5705_PLUS))
12165                 mem_tbl = mem_tbl_5705;
12166         else
12167                 mem_tbl = mem_tbl_570x;
12168
12169         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
12170                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
12171                 if (err)
12172                         break;
12173         }
12174
12175         return err;
12176 }
12177
12178 #define TG3_TSO_MSS             500
12179
12180 #define TG3_TSO_IP_HDR_LEN      20
12181 #define TG3_TSO_TCP_HDR_LEN     20
12182 #define TG3_TSO_TCP_OPT_LEN     12
12183
12184 static const u8 tg3_tso_header[] = {
12185 0x08, 0x00,
12186 0x45, 0x00, 0x00, 0x00,
12187 0x00, 0x00, 0x40, 0x00,
12188 0x40, 0x06, 0x00, 0x00,
12189 0x0a, 0x00, 0x00, 0x01,
12190 0x0a, 0x00, 0x00, 0x02,
12191 0x0d, 0x00, 0xe0, 0x00,
12192 0x00, 0x00, 0x01, 0x00,
12193 0x00, 0x00, 0x02, 0x00,
12194 0x80, 0x10, 0x10, 0x00,
12195 0x14, 0x09, 0x00, 0x00,
12196 0x01, 0x01, 0x08, 0x0a,
12197 0x11, 0x11, 0x11, 0x11,
12198 0x11, 0x11, 0x11, 0x11,
12199 };
12200
12201 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
12202 {
12203         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
12204         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
12205         u32 budget;
12206         struct sk_buff *skb;
12207         u8 *tx_data, *rx_data;
12208         dma_addr_t map;
12209         int num_pkts, tx_len, rx_len, i, err;
12210         struct tg3_rx_buffer_desc *desc;
12211         struct tg3_napi *tnapi, *rnapi;
12212         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
12213
12214         tnapi = &tp->napi[0];
12215         rnapi = &tp->napi[0];
12216         if (tp->irq_cnt > 1) {
12217                 if (tg3_flag(tp, ENABLE_RSS))
12218                         rnapi = &tp->napi[1];
12219                 if (tg3_flag(tp, ENABLE_TSS))
12220                         tnapi = &tp->napi[1];
12221         }
12222         coal_now = tnapi->coal_now | rnapi->coal_now;
12223
12224         err = -EIO;
12225
12226         tx_len = pktsz;
12227         skb = netdev_alloc_skb(tp->dev, tx_len);
12228         if (!skb)
12229                 return -ENOMEM;
12230
12231         tx_data = skb_put(skb, tx_len);
12232         memcpy(tx_data, tp->dev->dev_addr, 6);
12233         memset(tx_data + 6, 0x0, 8);
12234
12235         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
12236
12237         if (tso_loopback) {
12238                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
12239
12240                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
12241                               TG3_TSO_TCP_OPT_LEN;
12242
12243                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
12244                        sizeof(tg3_tso_header));
12245                 mss = TG3_TSO_MSS;
12246
12247                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
12248                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
12249
12250                 /* Set the total length field in the IP header */
12251                 iph->tot_len = htons((u16)(mss + hdr_len));
12252
12253                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
12254                               TXD_FLAG_CPU_POST_DMA);
12255
12256                 if (tg3_flag(tp, HW_TSO_1) ||
12257                     tg3_flag(tp, HW_TSO_2) ||
12258                     tg3_flag(tp, HW_TSO_3)) {
12259                         struct tcphdr *th;
12260                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
12261                         th = (struct tcphdr *)&tx_data[val];
12262                         th->check = 0;
12263                 } else
12264                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
12265
12266                 if (tg3_flag(tp, HW_TSO_3)) {
12267                         mss |= (hdr_len & 0xc) << 12;
12268                         if (hdr_len & 0x10)
12269                                 base_flags |= 0x00000010;
12270                         base_flags |= (hdr_len & 0x3e0) << 5;
12271                 } else if (tg3_flag(tp, HW_TSO_2))
12272                         mss |= hdr_len << 9;
12273                 else if (tg3_flag(tp, HW_TSO_1) ||
12274                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
12275                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
12276                 } else {
12277                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
12278                 }
12279
12280                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
12281         } else {
12282                 num_pkts = 1;
12283                 data_off = ETH_HLEN;
12284
12285                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
12286                     tx_len > VLAN_ETH_FRAME_LEN)
12287                         base_flags |= TXD_FLAG_JMB_PKT;
12288         }
12289
12290         for (i = data_off; i < tx_len; i++)
12291                 tx_data[i] = (u8) (i & 0xff);
12292
12293         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
12294         if (pci_dma_mapping_error(tp->pdev, map)) {
12295                 dev_kfree_skb(skb);
12296                 return -EIO;
12297         }
12298
12299         val = tnapi->tx_prod;
12300         tnapi->tx_buffers[val].skb = skb;
12301         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
12302
12303         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12304                rnapi->coal_now);
12305
12306         udelay(10);
12307
12308         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
12309
12310         budget = tg3_tx_avail(tnapi);
12311         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
12312                             base_flags | TXD_FLAG_END, mss, 0)) {
12313                 tnapi->tx_buffers[val].skb = NULL;
12314                 dev_kfree_skb(skb);
12315                 return -EIO;
12316         }
12317
12318         tnapi->tx_prod++;
12319
12320         /* Sync BD data before updating mailbox */
12321         wmb();
12322
12323         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
12324         tr32_mailbox(tnapi->prodmbox);
12325
12326         udelay(10);
12327
12328         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
12329         for (i = 0; i < 35; i++) {
12330                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
12331                        coal_now);
12332
12333                 udelay(10);
12334
12335                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
12336                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
12337                 if ((tx_idx == tnapi->tx_prod) &&
12338                     (rx_idx == (rx_start_idx + num_pkts)))
12339                         break;
12340         }
12341
12342         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
12343         dev_kfree_skb(skb);
12344
12345         if (tx_idx != tnapi->tx_prod)
12346                 goto out;
12347
12348         if (rx_idx != rx_start_idx + num_pkts)
12349                 goto out;
12350
12351         val = data_off;
12352         while (rx_idx != rx_start_idx) {
12353                 desc = &rnapi->rx_rcb[rx_start_idx++];
12354                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
12355                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
12356
12357                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
12358                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
12359                         goto out;
12360
12361                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
12362                          - ETH_FCS_LEN;
12363
12364                 if (!tso_loopback) {
12365                         if (rx_len != tx_len)
12366                                 goto out;
12367
12368                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
12369                                 if (opaque_key != RXD_OPAQUE_RING_STD)
12370                                         goto out;
12371                         } else {
12372                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
12373                                         goto out;
12374                         }
12375                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
12376                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
12377                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
12378                         goto out;
12379                 }
12380
12381                 if (opaque_key == RXD_OPAQUE_RING_STD) {
12382                         rx_data = tpr->rx_std_buffers[desc_idx].data;
12383                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
12384                                              mapping);
12385                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
12386                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
12387                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
12388                                              mapping);
12389                 } else
12390                         goto out;
12391
12392                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
12393                                             PCI_DMA_FROMDEVICE);
12394
12395                 rx_data += TG3_RX_OFFSET(tp);
12396                 for (i = data_off; i < rx_len; i++, val++) {
12397                         if (*(rx_data + i) != (u8) (val & 0xff))
12398                                 goto out;
12399                 }
12400         }
12401
12402         err = 0;
12403
12404         /* tg3_free_rings will unmap and free the rx_data */
12405 out:
12406         return err;
12407 }
12408
12409 #define TG3_STD_LOOPBACK_FAILED         1
12410 #define TG3_JMB_LOOPBACK_FAILED         2
12411 #define TG3_TSO_LOOPBACK_FAILED         4
12412 #define TG3_LOOPBACK_FAILED \
12413         (TG3_STD_LOOPBACK_FAILED | \
12414          TG3_JMB_LOOPBACK_FAILED | \
12415          TG3_TSO_LOOPBACK_FAILED)
12416
12417 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
12418 {
12419         int err = -EIO;
12420         u32 eee_cap;
12421         u32 jmb_pkt_sz = 9000;
12422
12423         if (tp->dma_limit)
12424                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
12425
12426         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
12427         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
12428
12429         if (!netif_running(tp->dev)) {
12430                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12431                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12432                 if (do_extlpbk)
12433                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12434                 goto done;
12435         }
12436
12437         err = tg3_reset_hw(tp, 1);
12438         if (err) {
12439                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12440                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12441                 if (do_extlpbk)
12442                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
12443                 goto done;
12444         }
12445
12446         if (tg3_flag(tp, ENABLE_RSS)) {
12447                 int i;
12448
12449                 /* Reroute all rx packets to the 1st queue */
12450                 for (i = MAC_RSS_INDIR_TBL_0;
12451                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
12452                         tw32(i, 0x0);
12453         }
12454
12455         /* HW errata - mac loopback fails in some cases on 5780.
12456          * Normal traffic and PHY loopback are not affected by
12457          * errata.  Also, the MAC loopback test is deprecated for
12458          * all newer ASIC revisions.
12459          */
12460         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
12461             !tg3_flag(tp, CPMU_PRESENT)) {
12462                 tg3_mac_loopback(tp, true);
12463
12464                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12465                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12466
12467                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12468                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12469                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12470
12471                 tg3_mac_loopback(tp, false);
12472         }
12473
12474         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
12475             !tg3_flag(tp, USE_PHYLIB)) {
12476                 int i;
12477
12478                 tg3_phy_lpbk_set(tp, 0, false);
12479
12480                 /* Wait for link */
12481                 for (i = 0; i < 100; i++) {
12482                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
12483                                 break;
12484                         mdelay(1);
12485                 }
12486
12487                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12488                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
12489                 if (tg3_flag(tp, TSO_CAPABLE) &&
12490                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12491                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
12492                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12493                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12494                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
12495
12496                 if (do_extlpbk) {
12497                         tg3_phy_lpbk_set(tp, 0, true);
12498
12499                         /* All link indications report up, but the hardware
12500                          * isn't really ready for about 20 msec.  Double it
12501                          * to be sure.
12502                          */
12503                         mdelay(40);
12504
12505                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
12506                                 data[TG3_EXT_LOOPB_TEST] |=
12507                                                         TG3_STD_LOOPBACK_FAILED;
12508                         if (tg3_flag(tp, TSO_CAPABLE) &&
12509                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
12510                                 data[TG3_EXT_LOOPB_TEST] |=
12511                                                         TG3_TSO_LOOPBACK_FAILED;
12512                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
12513                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
12514                                 data[TG3_EXT_LOOPB_TEST] |=
12515                                                         TG3_JMB_LOOPBACK_FAILED;
12516                 }
12517
12518                 /* Re-enable gphy autopowerdown. */
12519                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
12520                         tg3_phy_toggle_apd(tp, true);
12521         }
12522
12523         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
12524                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
12525
12526 done:
12527         tp->phy_flags |= eee_cap;
12528
12529         return err;
12530 }
12531
12532 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
12533                           u64 *data)
12534 {
12535         struct tg3 *tp = netdev_priv(dev);
12536         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
12537
12538         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
12539             tg3_power_up(tp)) {
12540                 etest->flags |= ETH_TEST_FL_FAILED;
12541                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
12542                 return;
12543         }
12544
12545         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
12546
12547         if (tg3_test_nvram(tp) != 0) {
12548                 etest->flags |= ETH_TEST_FL_FAILED;
12549                 data[TG3_NVRAM_TEST] = 1;
12550         }
12551         if (!doextlpbk && tg3_test_link(tp)) {
12552                 etest->flags |= ETH_TEST_FL_FAILED;
12553                 data[TG3_LINK_TEST] = 1;
12554         }
12555         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12556                 int err, err2 = 0, irq_sync = 0;
12557
12558                 if (netif_running(dev)) {
12559                         tg3_phy_stop(tp);
12560                         tg3_netif_stop(tp);
12561                         irq_sync = 1;
12562                 }
12563
12564                 tg3_full_lock(tp, irq_sync);
12565                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12566                 err = tg3_nvram_lock(tp);
12567                 tg3_halt_cpu(tp, RX_CPU_BASE);
12568                 if (!tg3_flag(tp, 5705_PLUS))
12569                         tg3_halt_cpu(tp, TX_CPU_BASE);
12570                 if (!err)
12571                         tg3_nvram_unlock(tp);
12572
12573                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12574                         tg3_phy_reset(tp);
12575
12576                 if (tg3_test_registers(tp) != 0) {
12577                         etest->flags |= ETH_TEST_FL_FAILED;
12578                         data[TG3_REGISTER_TEST] = 1;
12579                 }
12580
12581                 if (tg3_test_memory(tp) != 0) {
12582                         etest->flags |= ETH_TEST_FL_FAILED;
12583                         data[TG3_MEMORY_TEST] = 1;
12584                 }
12585
12586                 if (doextlpbk)
12587                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12588
12589                 if (tg3_test_loopback(tp, data, doextlpbk))
12590                         etest->flags |= ETH_TEST_FL_FAILED;
12591
12592                 tg3_full_unlock(tp);
12593
12594                 if (tg3_test_interrupt(tp) != 0) {
12595                         etest->flags |= ETH_TEST_FL_FAILED;
12596                         data[TG3_INTERRUPT_TEST] = 1;
12597                 }
12598
12599                 tg3_full_lock(tp, 0);
12600
12601                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12602                 if (netif_running(dev)) {
12603                         tg3_flag_set(tp, INIT_COMPLETE);
12604                         err2 = tg3_restart_hw(tp, 1);
12605                         if (!err2)
12606                                 tg3_netif_start(tp);
12607                 }
12608
12609                 tg3_full_unlock(tp);
12610
12611                 if (irq_sync && !err2)
12612                         tg3_phy_start(tp);
12613         }
12614         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12615                 tg3_power_down(tp);
12616
12617 }
12618
12619 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12620 {
12621         struct mii_ioctl_data *data = if_mii(ifr);
12622         struct tg3 *tp = netdev_priv(dev);
12623         int err;
12624
12625         if (tg3_flag(tp, USE_PHYLIB)) {
12626                 struct phy_device *phydev;
12627                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12628                         return -EAGAIN;
12629                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12630                 return phy_mii_ioctl(phydev, ifr, cmd);
12631         }
12632
12633         switch (cmd) {
12634         case SIOCGMIIPHY:
12635                 data->phy_id = tp->phy_addr;
12636
12637                 /* fallthru */
12638         case SIOCGMIIREG: {
12639                 u32 mii_regval;
12640
12641                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12642                         break;                  /* We have no PHY */
12643
12644                 if (!netif_running(dev))
12645                         return -EAGAIN;
12646
12647                 spin_lock_bh(&tp->lock);
12648                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12649                 spin_unlock_bh(&tp->lock);
12650
12651                 data->val_out = mii_regval;
12652
12653                 return err;
12654         }
12655
12656         case SIOCSMIIREG:
12657                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12658                         break;                  /* We have no PHY */
12659
12660                 if (!netif_running(dev))
12661                         return -EAGAIN;
12662
12663                 spin_lock_bh(&tp->lock);
12664                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12665                 spin_unlock_bh(&tp->lock);
12666
12667                 return err;
12668
12669         default:
12670                 /* do nothing */
12671                 break;
12672         }
12673         return -EOPNOTSUPP;
12674 }
12675
12676 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12677 {
12678         struct tg3 *tp = netdev_priv(dev);
12679
12680         memcpy(ec, &tp->coal, sizeof(*ec));
12681         return 0;
12682 }
12683
12684 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12685 {
12686         struct tg3 *tp = netdev_priv(dev);
12687         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12688         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12689
12690         if (!tg3_flag(tp, 5705_PLUS)) {
12691                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12692                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12693                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12694                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12695         }
12696
12697         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12698             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12699             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12700             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12701             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12702             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12703             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12704             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12705             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12706             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12707                 return -EINVAL;
12708
12709         /* No rx interrupts will be generated if both are zero */
12710         if ((ec->rx_coalesce_usecs == 0) &&
12711             (ec->rx_max_coalesced_frames == 0))
12712                 return -EINVAL;
12713
12714         /* No tx interrupts will be generated if both are zero */
12715         if ((ec->tx_coalesce_usecs == 0) &&
12716             (ec->tx_max_coalesced_frames == 0))
12717                 return -EINVAL;
12718
12719         /* Only copy relevant parameters, ignore all others. */
12720         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12721         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12722         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12723         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12724         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12725         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12726         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12727         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12728         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12729
12730         if (netif_running(dev)) {
12731                 tg3_full_lock(tp, 0);
12732                 __tg3_set_coalesce(tp, &tp->coal);
12733                 tg3_full_unlock(tp);
12734         }
12735         return 0;
12736 }
12737
12738 static const struct ethtool_ops tg3_ethtool_ops = {
12739         .get_settings           = tg3_get_settings,
12740         .set_settings           = tg3_set_settings,
12741         .get_drvinfo            = tg3_get_drvinfo,
12742         .get_regs_len           = tg3_get_regs_len,
12743         .get_regs               = tg3_get_regs,
12744         .get_wol                = tg3_get_wol,
12745         .set_wol                = tg3_set_wol,
12746         .get_msglevel           = tg3_get_msglevel,
12747         .set_msglevel           = tg3_set_msglevel,
12748         .nway_reset             = tg3_nway_reset,
12749         .get_link               = ethtool_op_get_link,
12750         .get_eeprom_len         = tg3_get_eeprom_len,
12751         .get_eeprom             = tg3_get_eeprom,
12752         .set_eeprom             = tg3_set_eeprom,
12753         .get_ringparam          = tg3_get_ringparam,
12754         .set_ringparam          = tg3_set_ringparam,
12755         .get_pauseparam         = tg3_get_pauseparam,
12756         .set_pauseparam         = tg3_set_pauseparam,
12757         .self_test              = tg3_self_test,
12758         .get_strings            = tg3_get_strings,
12759         .set_phys_id            = tg3_set_phys_id,
12760         .get_ethtool_stats      = tg3_get_ethtool_stats,
12761         .get_coalesce           = tg3_get_coalesce,
12762         .set_coalesce           = tg3_set_coalesce,
12763         .get_sset_count         = tg3_get_sset_count,
12764         .get_rxnfc              = tg3_get_rxnfc,
12765         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12766         .get_rxfh_indir         = tg3_get_rxfh_indir,
12767         .set_rxfh_indir         = tg3_set_rxfh_indir,
12768         .get_channels           = tg3_get_channels,
12769         .set_channels           = tg3_set_channels,
12770         .get_ts_info            = ethtool_op_get_ts_info,
12771 };
12772
12773 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12774                                                 struct rtnl_link_stats64 *stats)
12775 {
12776         struct tg3 *tp = netdev_priv(dev);
12777
12778         spin_lock_bh(&tp->lock);
12779         if (!tp->hw_stats) {
12780                 spin_unlock_bh(&tp->lock);
12781                 return &tp->net_stats_prev;
12782         }
12783
12784         tg3_get_nstats(tp, stats);
12785         spin_unlock_bh(&tp->lock);
12786
12787         return stats;
12788 }
12789
12790 static void tg3_set_rx_mode(struct net_device *dev)
12791 {
12792         struct tg3 *tp = netdev_priv(dev);
12793
12794         if (!netif_running(dev))
12795                 return;
12796
12797         tg3_full_lock(tp, 0);
12798         __tg3_set_rx_mode(dev);
12799         tg3_full_unlock(tp);
12800 }
12801
12802 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12803                                int new_mtu)
12804 {
12805         dev->mtu = new_mtu;
12806
12807         if (new_mtu > ETH_DATA_LEN) {
12808                 if (tg3_flag(tp, 5780_CLASS)) {
12809                         netdev_update_features(dev);
12810                         tg3_flag_clear(tp, TSO_CAPABLE);
12811                 } else {
12812                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12813                 }
12814         } else {
12815                 if (tg3_flag(tp, 5780_CLASS)) {
12816                         tg3_flag_set(tp, TSO_CAPABLE);
12817                         netdev_update_features(dev);
12818                 }
12819                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12820         }
12821 }
12822
12823 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12824 {
12825         struct tg3 *tp = netdev_priv(dev);
12826         int err, reset_phy = 0;
12827
12828         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12829                 return -EINVAL;
12830
12831         if (!netif_running(dev)) {
12832                 /* We'll just catch it later when the
12833                  * device is up'd.
12834                  */
12835                 tg3_set_mtu(dev, tp, new_mtu);
12836                 return 0;
12837         }
12838
12839         tg3_phy_stop(tp);
12840
12841         tg3_netif_stop(tp);
12842
12843         tg3_full_lock(tp, 1);
12844
12845         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12846
12847         tg3_set_mtu(dev, tp, new_mtu);
12848
12849         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12850          * breaks all requests to 256 bytes.
12851          */
12852         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12853                 reset_phy = 1;
12854
12855         err = tg3_restart_hw(tp, reset_phy);
12856
12857         if (!err)
12858                 tg3_netif_start(tp);
12859
12860         tg3_full_unlock(tp);
12861
12862         if (!err)
12863                 tg3_phy_start(tp);
12864
12865         return err;
12866 }
12867
12868 static const struct net_device_ops tg3_netdev_ops = {
12869         .ndo_open               = tg3_open,
12870         .ndo_stop               = tg3_close,
12871         .ndo_start_xmit         = tg3_start_xmit,
12872         .ndo_get_stats64        = tg3_get_stats64,
12873         .ndo_validate_addr      = eth_validate_addr,
12874         .ndo_set_rx_mode        = tg3_set_rx_mode,
12875         .ndo_set_mac_address    = tg3_set_mac_addr,
12876         .ndo_do_ioctl           = tg3_ioctl,
12877         .ndo_tx_timeout         = tg3_tx_timeout,
12878         .ndo_change_mtu         = tg3_change_mtu,
12879         .ndo_fix_features       = tg3_fix_features,
12880         .ndo_set_features       = tg3_set_features,
12881 #ifdef CONFIG_NET_POLL_CONTROLLER
12882         .ndo_poll_controller    = tg3_poll_controller,
12883 #endif
12884 };
12885
12886 static void tg3_get_eeprom_size(struct tg3 *tp)
12887 {
12888         u32 cursize, val, magic;
12889
12890         tp->nvram_size = EEPROM_CHIP_SIZE;
12891
12892         if (tg3_nvram_read(tp, 0, &magic) != 0)
12893                 return;
12894
12895         if ((magic != TG3_EEPROM_MAGIC) &&
12896             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12897             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12898                 return;
12899
12900         /*
12901          * Size the chip by reading offsets at increasing powers of two.
12902          * When we encounter our validation signature, we know the addressing
12903          * has wrapped around, and thus have our chip size.
12904          */
12905         cursize = 0x10;
12906
12907         while (cursize < tp->nvram_size) {
12908                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12909                         return;
12910
12911                 if (val == magic)
12912                         break;
12913
12914                 cursize <<= 1;
12915         }
12916
12917         tp->nvram_size = cursize;
12918 }
12919
12920 static void tg3_get_nvram_size(struct tg3 *tp)
12921 {
12922         u32 val;
12923
12924         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12925                 return;
12926
12927         /* Selfboot format */
12928         if (val != TG3_EEPROM_MAGIC) {
12929                 tg3_get_eeprom_size(tp);
12930                 return;
12931         }
12932
12933         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12934                 if (val != 0) {
12935                         /* This is confusing.  We want to operate on the
12936                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12937                          * call will read from NVRAM and byteswap the data
12938                          * according to the byteswapping settings for all
12939                          * other register accesses.  This ensures the data we
12940                          * want will always reside in the lower 16-bits.
12941                          * However, the data in NVRAM is in LE format, which
12942                          * means the data from the NVRAM read will always be
12943                          * opposite the endianness of the CPU.  The 16-bit
12944                          * byteswap then brings the data to CPU endianness.
12945                          */
12946                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12947                         return;
12948                 }
12949         }
12950         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12951 }
12952
12953 static void tg3_get_nvram_info(struct tg3 *tp)
12954 {
12955         u32 nvcfg1;
12956
12957         nvcfg1 = tr32(NVRAM_CFG1);
12958         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12959                 tg3_flag_set(tp, FLASH);
12960         } else {
12961                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12962                 tw32(NVRAM_CFG1, nvcfg1);
12963         }
12964
12965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12966             tg3_flag(tp, 5780_CLASS)) {
12967                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12968                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12969                         tp->nvram_jedecnum = JEDEC_ATMEL;
12970                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12971                         tg3_flag_set(tp, NVRAM_BUFFERED);
12972                         break;
12973                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12974                         tp->nvram_jedecnum = JEDEC_ATMEL;
12975                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12976                         break;
12977                 case FLASH_VENDOR_ATMEL_EEPROM:
12978                         tp->nvram_jedecnum = JEDEC_ATMEL;
12979                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12980                         tg3_flag_set(tp, NVRAM_BUFFERED);
12981                         break;
12982                 case FLASH_VENDOR_ST:
12983                         tp->nvram_jedecnum = JEDEC_ST;
12984                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12985                         tg3_flag_set(tp, NVRAM_BUFFERED);
12986                         break;
12987                 case FLASH_VENDOR_SAIFUN:
12988                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12989                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12990                         break;
12991                 case FLASH_VENDOR_SST_SMALL:
12992                 case FLASH_VENDOR_SST_LARGE:
12993                         tp->nvram_jedecnum = JEDEC_SST;
12994                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12995                         break;
12996                 }
12997         } else {
12998                 tp->nvram_jedecnum = JEDEC_ATMEL;
12999                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13000                 tg3_flag_set(tp, NVRAM_BUFFERED);
13001         }
13002 }
13003
13004 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13005 {
13006         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13007         case FLASH_5752PAGE_SIZE_256:
13008                 tp->nvram_pagesize = 256;
13009                 break;
13010         case FLASH_5752PAGE_SIZE_512:
13011                 tp->nvram_pagesize = 512;
13012                 break;
13013         case FLASH_5752PAGE_SIZE_1K:
13014                 tp->nvram_pagesize = 1024;
13015                 break;
13016         case FLASH_5752PAGE_SIZE_2K:
13017                 tp->nvram_pagesize = 2048;
13018                 break;
13019         case FLASH_5752PAGE_SIZE_4K:
13020                 tp->nvram_pagesize = 4096;
13021                 break;
13022         case FLASH_5752PAGE_SIZE_264:
13023                 tp->nvram_pagesize = 264;
13024                 break;
13025         case FLASH_5752PAGE_SIZE_528:
13026                 tp->nvram_pagesize = 528;
13027                 break;
13028         }
13029 }
13030
13031 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13032 {
13033         u32 nvcfg1;
13034
13035         nvcfg1 = tr32(NVRAM_CFG1);
13036
13037         /* NVRAM protection for TPM */
13038         if (nvcfg1 & (1 << 27))
13039                 tg3_flag_set(tp, PROTECTED_NVRAM);
13040
13041         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13042         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13043         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
13044                 tp->nvram_jedecnum = JEDEC_ATMEL;
13045                 tg3_flag_set(tp, NVRAM_BUFFERED);
13046                 break;
13047         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13048                 tp->nvram_jedecnum = JEDEC_ATMEL;
13049                 tg3_flag_set(tp, NVRAM_BUFFERED);
13050                 tg3_flag_set(tp, FLASH);
13051                 break;
13052         case FLASH_5752VENDOR_ST_M45PE10:
13053         case FLASH_5752VENDOR_ST_M45PE20:
13054         case FLASH_5752VENDOR_ST_M45PE40:
13055                 tp->nvram_jedecnum = JEDEC_ST;
13056                 tg3_flag_set(tp, NVRAM_BUFFERED);
13057                 tg3_flag_set(tp, FLASH);
13058                 break;
13059         }
13060
13061         if (tg3_flag(tp, FLASH)) {
13062                 tg3_nvram_get_pagesize(tp, nvcfg1);
13063         } else {
13064                 /* For eeprom, set pagesize to maximum eeprom size */
13065                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13066
13067                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13068                 tw32(NVRAM_CFG1, nvcfg1);
13069         }
13070 }
13071
13072 static void tg3_get_5755_nvram_info(struct tg3 *tp)
13073 {
13074         u32 nvcfg1, protect = 0;
13075
13076         nvcfg1 = tr32(NVRAM_CFG1);
13077
13078         /* NVRAM protection for TPM */
13079         if (nvcfg1 & (1 << 27)) {
13080                 tg3_flag_set(tp, PROTECTED_NVRAM);
13081                 protect = 1;
13082         }
13083
13084         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13085         switch (nvcfg1) {
13086         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13087         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13088         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13089         case FLASH_5755VENDOR_ATMEL_FLASH_5:
13090                 tp->nvram_jedecnum = JEDEC_ATMEL;
13091                 tg3_flag_set(tp, NVRAM_BUFFERED);
13092                 tg3_flag_set(tp, FLASH);
13093                 tp->nvram_pagesize = 264;
13094                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
13095                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
13096                         tp->nvram_size = (protect ? 0x3e200 :
13097                                           TG3_NVRAM_SIZE_512KB);
13098                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
13099                         tp->nvram_size = (protect ? 0x1f200 :
13100                                           TG3_NVRAM_SIZE_256KB);
13101                 else
13102                         tp->nvram_size = (protect ? 0x1f200 :
13103                                           TG3_NVRAM_SIZE_128KB);
13104                 break;
13105         case FLASH_5752VENDOR_ST_M45PE10:
13106         case FLASH_5752VENDOR_ST_M45PE20:
13107         case FLASH_5752VENDOR_ST_M45PE40:
13108                 tp->nvram_jedecnum = JEDEC_ST;
13109                 tg3_flag_set(tp, NVRAM_BUFFERED);
13110                 tg3_flag_set(tp, FLASH);
13111                 tp->nvram_pagesize = 256;
13112                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
13113                         tp->nvram_size = (protect ?
13114                                           TG3_NVRAM_SIZE_64KB :
13115                                           TG3_NVRAM_SIZE_128KB);
13116                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
13117                         tp->nvram_size = (protect ?
13118                                           TG3_NVRAM_SIZE_64KB :
13119                                           TG3_NVRAM_SIZE_256KB);
13120                 else
13121                         tp->nvram_size = (protect ?
13122                                           TG3_NVRAM_SIZE_128KB :
13123                                           TG3_NVRAM_SIZE_512KB);
13124                 break;
13125         }
13126 }
13127
13128 static void tg3_get_5787_nvram_info(struct tg3 *tp)
13129 {
13130         u32 nvcfg1;
13131
13132         nvcfg1 = tr32(NVRAM_CFG1);
13133
13134         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13135         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
13136         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13137         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
13138         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13139                 tp->nvram_jedecnum = JEDEC_ATMEL;
13140                 tg3_flag_set(tp, NVRAM_BUFFERED);
13141                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13142
13143                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13144                 tw32(NVRAM_CFG1, nvcfg1);
13145                 break;
13146         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13147         case FLASH_5755VENDOR_ATMEL_FLASH_1:
13148         case FLASH_5755VENDOR_ATMEL_FLASH_2:
13149         case FLASH_5755VENDOR_ATMEL_FLASH_3:
13150                 tp->nvram_jedecnum = JEDEC_ATMEL;
13151                 tg3_flag_set(tp, NVRAM_BUFFERED);
13152                 tg3_flag_set(tp, FLASH);
13153                 tp->nvram_pagesize = 264;
13154                 break;
13155         case FLASH_5752VENDOR_ST_M45PE10:
13156         case FLASH_5752VENDOR_ST_M45PE20:
13157         case FLASH_5752VENDOR_ST_M45PE40:
13158                 tp->nvram_jedecnum = JEDEC_ST;
13159                 tg3_flag_set(tp, NVRAM_BUFFERED);
13160                 tg3_flag_set(tp, FLASH);
13161                 tp->nvram_pagesize = 256;
13162                 break;
13163         }
13164 }
13165
13166 static void tg3_get_5761_nvram_info(struct tg3 *tp)
13167 {
13168         u32 nvcfg1, protect = 0;
13169
13170         nvcfg1 = tr32(NVRAM_CFG1);
13171
13172         /* NVRAM protection for TPM */
13173         if (nvcfg1 & (1 << 27)) {
13174                 tg3_flag_set(tp, PROTECTED_NVRAM);
13175                 protect = 1;
13176         }
13177
13178         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
13179         switch (nvcfg1) {
13180         case FLASH_5761VENDOR_ATMEL_ADB021D:
13181         case FLASH_5761VENDOR_ATMEL_ADB041D:
13182         case FLASH_5761VENDOR_ATMEL_ADB081D:
13183         case FLASH_5761VENDOR_ATMEL_ADB161D:
13184         case FLASH_5761VENDOR_ATMEL_MDB021D:
13185         case FLASH_5761VENDOR_ATMEL_MDB041D:
13186         case FLASH_5761VENDOR_ATMEL_MDB081D:
13187         case FLASH_5761VENDOR_ATMEL_MDB161D:
13188                 tp->nvram_jedecnum = JEDEC_ATMEL;
13189                 tg3_flag_set(tp, NVRAM_BUFFERED);
13190                 tg3_flag_set(tp, FLASH);
13191                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13192                 tp->nvram_pagesize = 256;
13193                 break;
13194         case FLASH_5761VENDOR_ST_A_M45PE20:
13195         case FLASH_5761VENDOR_ST_A_M45PE40:
13196         case FLASH_5761VENDOR_ST_A_M45PE80:
13197         case FLASH_5761VENDOR_ST_A_M45PE16:
13198         case FLASH_5761VENDOR_ST_M_M45PE20:
13199         case FLASH_5761VENDOR_ST_M_M45PE40:
13200         case FLASH_5761VENDOR_ST_M_M45PE80:
13201         case FLASH_5761VENDOR_ST_M_M45PE16:
13202                 tp->nvram_jedecnum = JEDEC_ST;
13203                 tg3_flag_set(tp, NVRAM_BUFFERED);
13204                 tg3_flag_set(tp, FLASH);
13205                 tp->nvram_pagesize = 256;
13206                 break;
13207         }
13208
13209         if (protect) {
13210                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
13211         } else {
13212                 switch (nvcfg1) {
13213                 case FLASH_5761VENDOR_ATMEL_ADB161D:
13214                 case FLASH_5761VENDOR_ATMEL_MDB161D:
13215                 case FLASH_5761VENDOR_ST_A_M45PE16:
13216                 case FLASH_5761VENDOR_ST_M_M45PE16:
13217                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
13218                         break;
13219                 case FLASH_5761VENDOR_ATMEL_ADB081D:
13220                 case FLASH_5761VENDOR_ATMEL_MDB081D:
13221                 case FLASH_5761VENDOR_ST_A_M45PE80:
13222                 case FLASH_5761VENDOR_ST_M_M45PE80:
13223                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13224                         break;
13225                 case FLASH_5761VENDOR_ATMEL_ADB041D:
13226                 case FLASH_5761VENDOR_ATMEL_MDB041D:
13227                 case FLASH_5761VENDOR_ST_A_M45PE40:
13228                 case FLASH_5761VENDOR_ST_M_M45PE40:
13229                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13230                         break;
13231                 case FLASH_5761VENDOR_ATMEL_ADB021D:
13232                 case FLASH_5761VENDOR_ATMEL_MDB021D:
13233                 case FLASH_5761VENDOR_ST_A_M45PE20:
13234                 case FLASH_5761VENDOR_ST_M_M45PE20:
13235                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13236                         break;
13237                 }
13238         }
13239 }
13240
13241 static void tg3_get_5906_nvram_info(struct tg3 *tp)
13242 {
13243         tp->nvram_jedecnum = JEDEC_ATMEL;
13244         tg3_flag_set(tp, NVRAM_BUFFERED);
13245         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13246 }
13247
13248 static void tg3_get_57780_nvram_info(struct tg3 *tp)
13249 {
13250         u32 nvcfg1;
13251
13252         nvcfg1 = tr32(NVRAM_CFG1);
13253
13254         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13255         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
13256         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
13257                 tp->nvram_jedecnum = JEDEC_ATMEL;
13258                 tg3_flag_set(tp, NVRAM_BUFFERED);
13259                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13260
13261                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13262                 tw32(NVRAM_CFG1, nvcfg1);
13263                 return;
13264         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13265         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13266         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13267         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13268         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13269         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13270         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13271                 tp->nvram_jedecnum = JEDEC_ATMEL;
13272                 tg3_flag_set(tp, NVRAM_BUFFERED);
13273                 tg3_flag_set(tp, FLASH);
13274
13275                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13276                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
13277                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
13278                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
13279                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13280                         break;
13281                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
13282                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
13283                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13284                         break;
13285                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
13286                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
13287                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13288                         break;
13289                 }
13290                 break;
13291         case FLASH_5752VENDOR_ST_M45PE10:
13292         case FLASH_5752VENDOR_ST_M45PE20:
13293         case FLASH_5752VENDOR_ST_M45PE40:
13294                 tp->nvram_jedecnum = JEDEC_ST;
13295                 tg3_flag_set(tp, NVRAM_BUFFERED);
13296                 tg3_flag_set(tp, FLASH);
13297
13298                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13299                 case FLASH_5752VENDOR_ST_M45PE10:
13300                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13301                         break;
13302                 case FLASH_5752VENDOR_ST_M45PE20:
13303                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13304                         break;
13305                 case FLASH_5752VENDOR_ST_M45PE40:
13306                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13307                         break;
13308                 }
13309                 break;
13310         default:
13311                 tg3_flag_set(tp, NO_NVRAM);
13312                 return;
13313         }
13314
13315         tg3_nvram_get_pagesize(tp, nvcfg1);
13316         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13317                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13318 }
13319
13320
13321 static void tg3_get_5717_nvram_info(struct tg3 *tp)
13322 {
13323         u32 nvcfg1;
13324
13325         nvcfg1 = tr32(NVRAM_CFG1);
13326
13327         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13328         case FLASH_5717VENDOR_ATMEL_EEPROM:
13329         case FLASH_5717VENDOR_MICRO_EEPROM:
13330                 tp->nvram_jedecnum = JEDEC_ATMEL;
13331                 tg3_flag_set(tp, NVRAM_BUFFERED);
13332                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13333
13334                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13335                 tw32(NVRAM_CFG1, nvcfg1);
13336                 return;
13337         case FLASH_5717VENDOR_ATMEL_MDB011D:
13338         case FLASH_5717VENDOR_ATMEL_ADB011B:
13339         case FLASH_5717VENDOR_ATMEL_ADB011D:
13340         case FLASH_5717VENDOR_ATMEL_MDB021D:
13341         case FLASH_5717VENDOR_ATMEL_ADB021B:
13342         case FLASH_5717VENDOR_ATMEL_ADB021D:
13343         case FLASH_5717VENDOR_ATMEL_45USPT:
13344                 tp->nvram_jedecnum = JEDEC_ATMEL;
13345                 tg3_flag_set(tp, NVRAM_BUFFERED);
13346                 tg3_flag_set(tp, FLASH);
13347
13348                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13349                 case FLASH_5717VENDOR_ATMEL_MDB021D:
13350                         /* Detect size with tg3_nvram_get_size() */
13351                         break;
13352                 case FLASH_5717VENDOR_ATMEL_ADB021B:
13353                 case FLASH_5717VENDOR_ATMEL_ADB021D:
13354                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13355                         break;
13356                 default:
13357                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13358                         break;
13359                 }
13360                 break;
13361         case FLASH_5717VENDOR_ST_M_M25PE10:
13362         case FLASH_5717VENDOR_ST_A_M25PE10:
13363         case FLASH_5717VENDOR_ST_M_M45PE10:
13364         case FLASH_5717VENDOR_ST_A_M45PE10:
13365         case FLASH_5717VENDOR_ST_M_M25PE20:
13366         case FLASH_5717VENDOR_ST_A_M25PE20:
13367         case FLASH_5717VENDOR_ST_M_M45PE20:
13368         case FLASH_5717VENDOR_ST_A_M45PE20:
13369         case FLASH_5717VENDOR_ST_25USPT:
13370         case FLASH_5717VENDOR_ST_45USPT:
13371                 tp->nvram_jedecnum = JEDEC_ST;
13372                 tg3_flag_set(tp, NVRAM_BUFFERED);
13373                 tg3_flag_set(tp, FLASH);
13374
13375                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13376                 case FLASH_5717VENDOR_ST_M_M25PE20:
13377                 case FLASH_5717VENDOR_ST_M_M45PE20:
13378                         /* Detect size with tg3_nvram_get_size() */
13379                         break;
13380                 case FLASH_5717VENDOR_ST_A_M25PE20:
13381                 case FLASH_5717VENDOR_ST_A_M45PE20:
13382                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13383                         break;
13384                 default:
13385                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13386                         break;
13387                 }
13388                 break;
13389         default:
13390                 tg3_flag_set(tp, NO_NVRAM);
13391                 return;
13392         }
13393
13394         tg3_nvram_get_pagesize(tp, nvcfg1);
13395         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13396                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13397 }
13398
13399 static void tg3_get_5720_nvram_info(struct tg3 *tp)
13400 {
13401         u32 nvcfg1, nvmpinstrp;
13402
13403         nvcfg1 = tr32(NVRAM_CFG1);
13404         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
13405
13406         switch (nvmpinstrp) {
13407         case FLASH_5720_EEPROM_HD:
13408         case FLASH_5720_EEPROM_LD:
13409                 tp->nvram_jedecnum = JEDEC_ATMEL;
13410                 tg3_flag_set(tp, NVRAM_BUFFERED);
13411
13412                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13413                 tw32(NVRAM_CFG1, nvcfg1);
13414                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
13415                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13416                 else
13417                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
13418                 return;
13419         case FLASH_5720VENDOR_M_ATMEL_DB011D:
13420         case FLASH_5720VENDOR_A_ATMEL_DB011B:
13421         case FLASH_5720VENDOR_A_ATMEL_DB011D:
13422         case FLASH_5720VENDOR_M_ATMEL_DB021D:
13423         case FLASH_5720VENDOR_A_ATMEL_DB021B:
13424         case FLASH_5720VENDOR_A_ATMEL_DB021D:
13425         case FLASH_5720VENDOR_M_ATMEL_DB041D:
13426         case FLASH_5720VENDOR_A_ATMEL_DB041B:
13427         case FLASH_5720VENDOR_A_ATMEL_DB041D:
13428         case FLASH_5720VENDOR_M_ATMEL_DB081D:
13429         case FLASH_5720VENDOR_A_ATMEL_DB081D:
13430         case FLASH_5720VENDOR_ATMEL_45USPT:
13431                 tp->nvram_jedecnum = JEDEC_ATMEL;
13432                 tg3_flag_set(tp, NVRAM_BUFFERED);
13433                 tg3_flag_set(tp, FLASH);
13434
13435                 switch (nvmpinstrp) {
13436                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
13437                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
13438                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
13439                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13440                         break;
13441                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
13442                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
13443                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
13444                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13445                         break;
13446                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
13447                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
13448                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13449                         break;
13450                 default:
13451                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13452                         break;
13453                 }
13454                 break;
13455         case FLASH_5720VENDOR_M_ST_M25PE10:
13456         case FLASH_5720VENDOR_M_ST_M45PE10:
13457         case FLASH_5720VENDOR_A_ST_M25PE10:
13458         case FLASH_5720VENDOR_A_ST_M45PE10:
13459         case FLASH_5720VENDOR_M_ST_M25PE20:
13460         case FLASH_5720VENDOR_M_ST_M45PE20:
13461         case FLASH_5720VENDOR_A_ST_M25PE20:
13462         case FLASH_5720VENDOR_A_ST_M45PE20:
13463         case FLASH_5720VENDOR_M_ST_M25PE40:
13464         case FLASH_5720VENDOR_M_ST_M45PE40:
13465         case FLASH_5720VENDOR_A_ST_M25PE40:
13466         case FLASH_5720VENDOR_A_ST_M45PE40:
13467         case FLASH_5720VENDOR_M_ST_M25PE80:
13468         case FLASH_5720VENDOR_M_ST_M45PE80:
13469         case FLASH_5720VENDOR_A_ST_M25PE80:
13470         case FLASH_5720VENDOR_A_ST_M45PE80:
13471         case FLASH_5720VENDOR_ST_25USPT:
13472         case FLASH_5720VENDOR_ST_45USPT:
13473                 tp->nvram_jedecnum = JEDEC_ST;
13474                 tg3_flag_set(tp, NVRAM_BUFFERED);
13475                 tg3_flag_set(tp, FLASH);
13476
13477                 switch (nvmpinstrp) {
13478                 case FLASH_5720VENDOR_M_ST_M25PE20:
13479                 case FLASH_5720VENDOR_M_ST_M45PE20:
13480                 case FLASH_5720VENDOR_A_ST_M25PE20:
13481                 case FLASH_5720VENDOR_A_ST_M45PE20:
13482                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
13483                         break;
13484                 case FLASH_5720VENDOR_M_ST_M25PE40:
13485                 case FLASH_5720VENDOR_M_ST_M45PE40:
13486                 case FLASH_5720VENDOR_A_ST_M25PE40:
13487                 case FLASH_5720VENDOR_A_ST_M45PE40:
13488                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13489                         break;
13490                 case FLASH_5720VENDOR_M_ST_M25PE80:
13491                 case FLASH_5720VENDOR_M_ST_M45PE80:
13492                 case FLASH_5720VENDOR_A_ST_M25PE80:
13493                 case FLASH_5720VENDOR_A_ST_M45PE80:
13494                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
13495                         break;
13496                 default:
13497                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
13498                         break;
13499                 }
13500                 break;
13501         default:
13502                 tg3_flag_set(tp, NO_NVRAM);
13503                 return;
13504         }
13505
13506         tg3_nvram_get_pagesize(tp, nvcfg1);
13507         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
13508                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
13509 }
13510
13511 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
13512 static void tg3_nvram_init(struct tg3 *tp)
13513 {
13514         tw32_f(GRC_EEPROM_ADDR,
13515              (EEPROM_ADDR_FSM_RESET |
13516               (EEPROM_DEFAULT_CLOCK_PERIOD <<
13517                EEPROM_ADDR_CLKPERD_SHIFT)));
13518
13519         msleep(1);
13520
13521         /* Enable seeprom accesses. */
13522         tw32_f(GRC_LOCAL_CTRL,
13523              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
13524         udelay(100);
13525
13526         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13527             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
13528                 tg3_flag_set(tp, NVRAM);
13529
13530                 if (tg3_nvram_lock(tp)) {
13531                         netdev_warn(tp->dev,
13532                                     "Cannot get nvram lock, %s failed\n",
13533                                     __func__);
13534                         return;
13535                 }
13536                 tg3_enable_nvram_access(tp);
13537
13538                 tp->nvram_size = 0;
13539
13540                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13541                         tg3_get_5752_nvram_info(tp);
13542                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
13543                         tg3_get_5755_nvram_info(tp);
13544                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13545                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13546                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13547                         tg3_get_5787_nvram_info(tp);
13548                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
13549                         tg3_get_5761_nvram_info(tp);
13550                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13551                         tg3_get_5906_nvram_info(tp);
13552                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13553                          tg3_flag(tp, 57765_CLASS))
13554                         tg3_get_57780_nvram_info(tp);
13555                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13556                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13557                         tg3_get_5717_nvram_info(tp);
13558                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13559                         tg3_get_5720_nvram_info(tp);
13560                 else
13561                         tg3_get_nvram_info(tp);
13562
13563                 if (tp->nvram_size == 0)
13564                         tg3_get_nvram_size(tp);
13565
13566                 tg3_disable_nvram_access(tp);
13567                 tg3_nvram_unlock(tp);
13568
13569         } else {
13570                 tg3_flag_clear(tp, NVRAM);
13571                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13572
13573                 tg3_get_eeprom_size(tp);
13574         }
13575 }
13576
13577 struct subsys_tbl_ent {
13578         u16 subsys_vendor, subsys_devid;
13579         u32 phy_id;
13580 };
13581
13582 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
13583         /* Broadcom boards. */
13584         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13585           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13586         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13587           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13588         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13589           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13590         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13591           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13592         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13593           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13594         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13595           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13596         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13597           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13598         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13599           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13600         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13601           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13602         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13603           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13604         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13605           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13606
13607         /* 3com boards. */
13608         { TG3PCI_SUBVENDOR_ID_3COM,
13609           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13610         { TG3PCI_SUBVENDOR_ID_3COM,
13611           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13612         { TG3PCI_SUBVENDOR_ID_3COM,
13613           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13614         { TG3PCI_SUBVENDOR_ID_3COM,
13615           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13616         { TG3PCI_SUBVENDOR_ID_3COM,
13617           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13618
13619         /* DELL boards. */
13620         { TG3PCI_SUBVENDOR_ID_DELL,
13621           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13622         { TG3PCI_SUBVENDOR_ID_DELL,
13623           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13624         { TG3PCI_SUBVENDOR_ID_DELL,
13625           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13626         { TG3PCI_SUBVENDOR_ID_DELL,
13627           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13628
13629         /* Compaq boards. */
13630         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13631           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13632         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13633           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13634         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13635           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13636         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13637           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13638         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13639           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13640
13641         /* IBM boards. */
13642         { TG3PCI_SUBVENDOR_ID_IBM,
13643           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13644 };
13645
13646 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
13647 {
13648         int i;
13649
13650         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13651                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13652                      tp->pdev->subsystem_vendor) &&
13653                     (subsys_id_to_phy_id[i].subsys_devid ==
13654                      tp->pdev->subsystem_device))
13655                         return &subsys_id_to_phy_id[i];
13656         }
13657         return NULL;
13658 }
13659
13660 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13661 {
13662         u32 val;
13663
13664         tp->phy_id = TG3_PHY_ID_INVALID;
13665         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13666
13667         /* Assume an onboard device and WOL capable by default.  */
13668         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13669         tg3_flag_set(tp, WOL_CAP);
13670
13671         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13672                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13673                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13674                         tg3_flag_set(tp, IS_NIC);
13675                 }
13676                 val = tr32(VCPU_CFGSHDW);
13677                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13678                         tg3_flag_set(tp, ASPM_WORKAROUND);
13679                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13680                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13681                         tg3_flag_set(tp, WOL_ENABLE);
13682                         device_set_wakeup_enable(&tp->pdev->dev, true);
13683                 }
13684                 goto done;
13685         }
13686
13687         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13688         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13689                 u32 nic_cfg, led_cfg;
13690                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13691                 int eeprom_phy_serdes = 0;
13692
13693                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13694                 tp->nic_sram_data_cfg = nic_cfg;
13695
13696                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13697                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13698                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13699                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13700                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13701                     (ver > 0) && (ver < 0x100))
13702                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13703
13704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13705                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13706
13707                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13708                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13709                         eeprom_phy_serdes = 1;
13710
13711                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13712                 if (nic_phy_id != 0) {
13713                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13714                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13715
13716                         eeprom_phy_id  = (id1 >> 16) << 10;
13717                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13718                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13719                 } else
13720                         eeprom_phy_id = 0;
13721
13722                 tp->phy_id = eeprom_phy_id;
13723                 if (eeprom_phy_serdes) {
13724                         if (!tg3_flag(tp, 5705_PLUS))
13725                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13726                         else
13727                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13728                 }
13729
13730                 if (tg3_flag(tp, 5750_PLUS))
13731                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13732                                     SHASTA_EXT_LED_MODE_MASK);
13733                 else
13734                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13735
13736                 switch (led_cfg) {
13737                 default:
13738                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13739                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13740                         break;
13741
13742                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13743                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13744                         break;
13745
13746                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13747                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13748
13749                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13750                          * read on some older 5700/5701 bootcode.
13751                          */
13752                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13753                             ASIC_REV_5700 ||
13754                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13755                             ASIC_REV_5701)
13756                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13757
13758                         break;
13759
13760                 case SHASTA_EXT_LED_SHARED:
13761                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13762                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13763                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13764                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13765                                                  LED_CTRL_MODE_PHY_2);
13766                         break;
13767
13768                 case SHASTA_EXT_LED_MAC:
13769                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13770                         break;
13771
13772                 case SHASTA_EXT_LED_COMBO:
13773                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13774                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13775                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13776                                                  LED_CTRL_MODE_PHY_2);
13777                         break;
13778
13779                 }
13780
13781                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13782                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13783                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13784                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13785
13786                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13787                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13788
13789                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13790                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13791                         if ((tp->pdev->subsystem_vendor ==
13792                              PCI_VENDOR_ID_ARIMA) &&
13793                             (tp->pdev->subsystem_device == 0x205a ||
13794                              tp->pdev->subsystem_device == 0x2063))
13795                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13796                 } else {
13797                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13798                         tg3_flag_set(tp, IS_NIC);
13799                 }
13800
13801                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13802                         tg3_flag_set(tp, ENABLE_ASF);
13803                         if (tg3_flag(tp, 5750_PLUS))
13804                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13805                 }
13806
13807                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13808                     tg3_flag(tp, 5750_PLUS))
13809                         tg3_flag_set(tp, ENABLE_APE);
13810
13811                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13812                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13813                         tg3_flag_clear(tp, WOL_CAP);
13814
13815                 if (tg3_flag(tp, WOL_CAP) &&
13816                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13817                         tg3_flag_set(tp, WOL_ENABLE);
13818                         device_set_wakeup_enable(&tp->pdev->dev, true);
13819                 }
13820
13821                 if (cfg2 & (1 << 17))
13822                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13823
13824                 /* serdes signal pre-emphasis in register 0x590 set by */
13825                 /* bootcode if bit 18 is set */
13826                 if (cfg2 & (1 << 18))
13827                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13828
13829                 if ((tg3_flag(tp, 57765_PLUS) ||
13830                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13831                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13832                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13833                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13834
13835                 if (tg3_flag(tp, PCI_EXPRESS) &&
13836                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13837                     !tg3_flag(tp, 57765_PLUS)) {
13838                         u32 cfg3;
13839
13840                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13841                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13842                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13843                 }
13844
13845                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13846                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13847                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13848                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13849                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13850                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13851         }
13852 done:
13853         if (tg3_flag(tp, WOL_CAP))
13854                 device_set_wakeup_enable(&tp->pdev->dev,
13855                                          tg3_flag(tp, WOL_ENABLE));
13856         else
13857                 device_set_wakeup_capable(&tp->pdev->dev, false);
13858 }
13859
13860 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13861 {
13862         int i;
13863         u32 val;
13864
13865         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13866         tw32(OTP_CTRL, cmd);
13867
13868         /* Wait for up to 1 ms for command to execute. */
13869         for (i = 0; i < 100; i++) {
13870                 val = tr32(OTP_STATUS);
13871                 if (val & OTP_STATUS_CMD_DONE)
13872                         break;
13873                 udelay(10);
13874         }
13875
13876         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13877 }
13878
13879 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13880  * configuration is a 32-bit value that straddles the alignment boundary.
13881  * We do two 32-bit reads and then shift and merge the results.
13882  */
13883 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
13884 {
13885         u32 bhalf_otp, thalf_otp;
13886
13887         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13888
13889         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13890                 return 0;
13891
13892         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13893
13894         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13895                 return 0;
13896
13897         thalf_otp = tr32(OTP_READ_DATA);
13898
13899         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13900
13901         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13902                 return 0;
13903
13904         bhalf_otp = tr32(OTP_READ_DATA);
13905
13906         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13907 }
13908
13909 static void tg3_phy_init_link_config(struct tg3 *tp)
13910 {
13911         u32 adv = ADVERTISED_Autoneg;
13912
13913         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13914                 adv |= ADVERTISED_1000baseT_Half |
13915                        ADVERTISED_1000baseT_Full;
13916
13917         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13918                 adv |= ADVERTISED_100baseT_Half |
13919                        ADVERTISED_100baseT_Full |
13920                        ADVERTISED_10baseT_Half |
13921                        ADVERTISED_10baseT_Full |
13922                        ADVERTISED_TP;
13923         else
13924                 adv |= ADVERTISED_FIBRE;
13925
13926         tp->link_config.advertising = adv;
13927         tp->link_config.speed = SPEED_UNKNOWN;
13928         tp->link_config.duplex = DUPLEX_UNKNOWN;
13929         tp->link_config.autoneg = AUTONEG_ENABLE;
13930         tp->link_config.active_speed = SPEED_UNKNOWN;
13931         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13932
13933         tp->old_link = -1;
13934 }
13935
13936 static int tg3_phy_probe(struct tg3 *tp)
13937 {
13938         u32 hw_phy_id_1, hw_phy_id_2;
13939         u32 hw_phy_id, hw_phy_id_masked;
13940         int err;
13941
13942         /* flow control autonegotiation is default behavior */
13943         tg3_flag_set(tp, PAUSE_AUTONEG);
13944         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13945
13946         if (tg3_flag(tp, ENABLE_APE)) {
13947                 switch (tp->pci_fn) {
13948                 case 0:
13949                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
13950                         break;
13951                 case 1:
13952                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
13953                         break;
13954                 case 2:
13955                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
13956                         break;
13957                 case 3:
13958                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
13959                         break;
13960                 }
13961         }
13962
13963         if (tg3_flag(tp, USE_PHYLIB))
13964                 return tg3_phy_init(tp);
13965
13966         /* Reading the PHY ID register can conflict with ASF
13967          * firmware access to the PHY hardware.
13968          */
13969         err = 0;
13970         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13971                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13972         } else {
13973                 /* Now read the physical PHY_ID from the chip and verify
13974                  * that it is sane.  If it doesn't look good, we fall back
13975                  * to either the hard-coded table based PHY_ID and failing
13976                  * that the value found in the eeprom area.
13977                  */
13978                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13979                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13980
13981                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13982                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13983                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13984
13985                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13986         }
13987
13988         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13989                 tp->phy_id = hw_phy_id;
13990                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13991                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13992                 else
13993                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13994         } else {
13995                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13996                         /* Do nothing, phy ID already set up in
13997                          * tg3_get_eeprom_hw_cfg().
13998                          */
13999                 } else {
14000                         struct subsys_tbl_ent *p;
14001
14002                         /* No eeprom signature?  Try the hardcoded
14003                          * subsys device table.
14004                          */
14005                         p = tg3_lookup_by_subsys(tp);
14006                         if (!p)
14007                                 return -ENODEV;
14008
14009                         tp->phy_id = p->phy_id;
14010                         if (!tp->phy_id ||
14011                             tp->phy_id == TG3_PHY_ID_BCM8002)
14012                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14013                 }
14014         }
14015
14016         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14017             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14018              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
14019              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
14020               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
14021              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
14022               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
14023                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
14024
14025         tg3_phy_init_link_config(tp);
14026
14027         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14028             !tg3_flag(tp, ENABLE_APE) &&
14029             !tg3_flag(tp, ENABLE_ASF)) {
14030                 u32 bmsr, dummy;
14031
14032                 tg3_readphy(tp, MII_BMSR, &bmsr);
14033                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
14034                     (bmsr & BMSR_LSTATUS))
14035                         goto skip_phy_reset;
14036
14037                 err = tg3_phy_reset(tp);
14038                 if (err)
14039                         return err;
14040
14041                 tg3_phy_set_wirespeed(tp);
14042
14043                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
14044                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
14045                                             tp->link_config.flowctrl);
14046
14047                         tg3_writephy(tp, MII_BMCR,
14048                                      BMCR_ANENABLE | BMCR_ANRESTART);
14049                 }
14050         }
14051
14052 skip_phy_reset:
14053         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
14054                 err = tg3_init_5401phy_dsp(tp);
14055                 if (err)
14056                         return err;
14057
14058                 err = tg3_init_5401phy_dsp(tp);
14059         }
14060
14061         return err;
14062 }
14063
14064 static void tg3_read_vpd(struct tg3 *tp)
14065 {
14066         u8 *vpd_data;
14067         unsigned int block_end, rosize, len;
14068         u32 vpdlen;
14069         int j, i = 0;
14070
14071         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
14072         if (!vpd_data)
14073                 goto out_no_vpd;
14074
14075         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
14076         if (i < 0)
14077                 goto out_not_found;
14078
14079         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
14080         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
14081         i += PCI_VPD_LRDT_TAG_SIZE;
14082
14083         if (block_end > vpdlen)
14084                 goto out_not_found;
14085
14086         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14087                                       PCI_VPD_RO_KEYWORD_MFR_ID);
14088         if (j > 0) {
14089                 len = pci_vpd_info_field_size(&vpd_data[j]);
14090
14091                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14092                 if (j + len > block_end || len != 4 ||
14093                     memcmp(&vpd_data[j], "1028", 4))
14094                         goto partno;
14095
14096                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14097                                               PCI_VPD_RO_KEYWORD_VENDOR0);
14098                 if (j < 0)
14099                         goto partno;
14100
14101                 len = pci_vpd_info_field_size(&vpd_data[j]);
14102
14103                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
14104                 if (j + len > block_end)
14105                         goto partno;
14106
14107                 memcpy(tp->fw_ver, &vpd_data[j], len);
14108                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
14109         }
14110
14111 partno:
14112         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
14113                                       PCI_VPD_RO_KEYWORD_PARTNO);
14114         if (i < 0)
14115                 goto out_not_found;
14116
14117         len = pci_vpd_info_field_size(&vpd_data[i]);
14118
14119         i += PCI_VPD_INFO_FLD_HDR_SIZE;
14120         if (len > TG3_BPN_SIZE ||
14121             (len + i) > vpdlen)
14122                 goto out_not_found;
14123
14124         memcpy(tp->board_part_number, &vpd_data[i], len);
14125
14126 out_not_found:
14127         kfree(vpd_data);
14128         if (tp->board_part_number[0])
14129                 return;
14130
14131 out_no_vpd:
14132         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14133                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14134                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
14135                         strcpy(tp->board_part_number, "BCM5717");
14136                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
14137                         strcpy(tp->board_part_number, "BCM5718");
14138                 else
14139                         goto nomatch;
14140         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
14141                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
14142                         strcpy(tp->board_part_number, "BCM57780");
14143                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
14144                         strcpy(tp->board_part_number, "BCM57760");
14145                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
14146                         strcpy(tp->board_part_number, "BCM57790");
14147                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
14148                         strcpy(tp->board_part_number, "BCM57788");
14149                 else
14150                         goto nomatch;
14151         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
14152                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
14153                         strcpy(tp->board_part_number, "BCM57761");
14154                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
14155                         strcpy(tp->board_part_number, "BCM57765");
14156                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
14157                         strcpy(tp->board_part_number, "BCM57781");
14158                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
14159                         strcpy(tp->board_part_number, "BCM57785");
14160                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
14161                         strcpy(tp->board_part_number, "BCM57791");
14162                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
14163                         strcpy(tp->board_part_number, "BCM57795");
14164                 else
14165                         goto nomatch;
14166         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
14167                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
14168                         strcpy(tp->board_part_number, "BCM57762");
14169                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
14170                         strcpy(tp->board_part_number, "BCM57766");
14171                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
14172                         strcpy(tp->board_part_number, "BCM57782");
14173                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14174                         strcpy(tp->board_part_number, "BCM57786");
14175                 else
14176                         goto nomatch;
14177         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14178                 strcpy(tp->board_part_number, "BCM95906");
14179         } else {
14180 nomatch:
14181                 strcpy(tp->board_part_number, "none");
14182         }
14183 }
14184
14185 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
14186 {
14187         u32 val;
14188
14189         if (tg3_nvram_read(tp, offset, &val) ||
14190             (val & 0xfc000000) != 0x0c000000 ||
14191             tg3_nvram_read(tp, offset + 4, &val) ||
14192             val != 0)
14193                 return 0;
14194
14195         return 1;
14196 }
14197
14198 static void tg3_read_bc_ver(struct tg3 *tp)
14199 {
14200         u32 val, offset, start, ver_offset;
14201         int i, dst_off;
14202         bool newver = false;
14203
14204         if (tg3_nvram_read(tp, 0xc, &offset) ||
14205             tg3_nvram_read(tp, 0x4, &start))
14206                 return;
14207
14208         offset = tg3_nvram_logical_addr(tp, offset);
14209
14210         if (tg3_nvram_read(tp, offset, &val))
14211                 return;
14212
14213         if ((val & 0xfc000000) == 0x0c000000) {
14214                 if (tg3_nvram_read(tp, offset + 4, &val))
14215                         return;
14216
14217                 if (val == 0)
14218                         newver = true;
14219         }
14220
14221         dst_off = strlen(tp->fw_ver);
14222
14223         if (newver) {
14224                 if (TG3_VER_SIZE - dst_off < 16 ||
14225                     tg3_nvram_read(tp, offset + 8, &ver_offset))
14226                         return;
14227
14228                 offset = offset + ver_offset - start;
14229                 for (i = 0; i < 16; i += 4) {
14230                         __be32 v;
14231                         if (tg3_nvram_read_be32(tp, offset + i, &v))
14232                                 return;
14233
14234                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
14235                 }
14236         } else {
14237                 u32 major, minor;
14238
14239                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
14240                         return;
14241
14242                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
14243                         TG3_NVM_BCVER_MAJSFT;
14244                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
14245                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
14246                          "v%d.%02d", major, minor);
14247         }
14248 }
14249
14250 static void tg3_read_hwsb_ver(struct tg3 *tp)
14251 {
14252         u32 val, major, minor;
14253
14254         /* Use native endian representation */
14255         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
14256                 return;
14257
14258         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
14259                 TG3_NVM_HWSB_CFG1_MAJSFT;
14260         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
14261                 TG3_NVM_HWSB_CFG1_MINSFT;
14262
14263         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
14264 }
14265
14266 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
14267 {
14268         u32 offset, major, minor, build;
14269
14270         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
14271
14272         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
14273                 return;
14274
14275         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
14276         case TG3_EEPROM_SB_REVISION_0:
14277                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
14278                 break;
14279         case TG3_EEPROM_SB_REVISION_2:
14280                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
14281                 break;
14282         case TG3_EEPROM_SB_REVISION_3:
14283                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
14284                 break;
14285         case TG3_EEPROM_SB_REVISION_4:
14286                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
14287                 break;
14288         case TG3_EEPROM_SB_REVISION_5:
14289                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
14290                 break;
14291         case TG3_EEPROM_SB_REVISION_6:
14292                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
14293                 break;
14294         default:
14295                 return;
14296         }
14297
14298         if (tg3_nvram_read(tp, offset, &val))
14299                 return;
14300
14301         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
14302                 TG3_EEPROM_SB_EDH_BLD_SHFT;
14303         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
14304                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
14305         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
14306
14307         if (minor > 99 || build > 26)
14308                 return;
14309
14310         offset = strlen(tp->fw_ver);
14311         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
14312                  " v%d.%02d", major, minor);
14313
14314         if (build > 0) {
14315                 offset = strlen(tp->fw_ver);
14316                 if (offset < TG3_VER_SIZE - 1)
14317                         tp->fw_ver[offset] = 'a' + build - 1;
14318         }
14319 }
14320
14321 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
14322 {
14323         u32 val, offset, start;
14324         int i, vlen;
14325
14326         for (offset = TG3_NVM_DIR_START;
14327              offset < TG3_NVM_DIR_END;
14328              offset += TG3_NVM_DIRENT_SIZE) {
14329                 if (tg3_nvram_read(tp, offset, &val))
14330                         return;
14331
14332                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
14333                         break;
14334         }
14335
14336         if (offset == TG3_NVM_DIR_END)
14337                 return;
14338
14339         if (!tg3_flag(tp, 5705_PLUS))
14340                 start = 0x08000000;
14341         else if (tg3_nvram_read(tp, offset - 4, &start))
14342                 return;
14343
14344         if (tg3_nvram_read(tp, offset + 4, &offset) ||
14345             !tg3_fw_img_is_valid(tp, offset) ||
14346             tg3_nvram_read(tp, offset + 8, &val))
14347                 return;
14348
14349         offset += val - start;
14350
14351         vlen = strlen(tp->fw_ver);
14352
14353         tp->fw_ver[vlen++] = ',';
14354         tp->fw_ver[vlen++] = ' ';
14355
14356         for (i = 0; i < 4; i++) {
14357                 __be32 v;
14358                 if (tg3_nvram_read_be32(tp, offset, &v))
14359                         return;
14360
14361                 offset += sizeof(v);
14362
14363                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
14364                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
14365                         break;
14366                 }
14367
14368                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
14369                 vlen += sizeof(v);
14370         }
14371 }
14372
14373 static void tg3_probe_ncsi(struct tg3 *tp)
14374 {
14375         u32 apedata;
14376
14377         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
14378         if (apedata != APE_SEG_SIG_MAGIC)
14379                 return;
14380
14381         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
14382         if (!(apedata & APE_FW_STATUS_READY))
14383                 return;
14384
14385         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
14386                 tg3_flag_set(tp, APE_HAS_NCSI);
14387 }
14388
14389 static void tg3_read_dash_ver(struct tg3 *tp)
14390 {
14391         int vlen;
14392         u32 apedata;
14393         char *fwtype;
14394
14395         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
14396
14397         if (tg3_flag(tp, APE_HAS_NCSI))
14398                 fwtype = "NCSI";
14399         else
14400                 fwtype = "DASH";
14401
14402         vlen = strlen(tp->fw_ver);
14403
14404         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
14405                  fwtype,
14406                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
14407                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
14408                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
14409                  (apedata & APE_FW_VERSION_BLDMSK));
14410 }
14411
14412 static void tg3_read_fw_ver(struct tg3 *tp)
14413 {
14414         u32 val;
14415         bool vpd_vers = false;
14416
14417         if (tp->fw_ver[0] != 0)
14418                 vpd_vers = true;
14419
14420         if (tg3_flag(tp, NO_NVRAM)) {
14421                 strcat(tp->fw_ver, "sb");
14422                 return;
14423         }
14424
14425         if (tg3_nvram_read(tp, 0, &val))
14426                 return;
14427
14428         if (val == TG3_EEPROM_MAGIC)
14429                 tg3_read_bc_ver(tp);
14430         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
14431                 tg3_read_sb_ver(tp, val);
14432         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
14433                 tg3_read_hwsb_ver(tp);
14434
14435         if (tg3_flag(tp, ENABLE_ASF)) {
14436                 if (tg3_flag(tp, ENABLE_APE)) {
14437                         tg3_probe_ncsi(tp);
14438                         if (!vpd_vers)
14439                                 tg3_read_dash_ver(tp);
14440                 } else if (!vpd_vers) {
14441                         tg3_read_mgmtfw_ver(tp);
14442                 }
14443         }
14444
14445         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
14446 }
14447
14448 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
14449 {
14450         if (tg3_flag(tp, LRG_PROD_RING_CAP))
14451                 return TG3_RX_RET_MAX_SIZE_5717;
14452         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
14453                 return TG3_RX_RET_MAX_SIZE_5700;
14454         else
14455                 return TG3_RX_RET_MAX_SIZE_5705;
14456 }
14457
14458 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
14459         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
14460         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
14461         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
14462         { },
14463 };
14464
14465 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
14466 {
14467         struct pci_dev *peer;
14468         unsigned int func, devnr = tp->pdev->devfn & ~7;
14469
14470         for (func = 0; func < 8; func++) {
14471                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14472                 if (peer && peer != tp->pdev)
14473                         break;
14474                 pci_dev_put(peer);
14475         }
14476         /* 5704 can be configured in single-port mode, set peer to
14477          * tp->pdev in that case.
14478          */
14479         if (!peer) {
14480                 peer = tp->pdev;
14481                 return peer;
14482         }
14483
14484         /*
14485          * We don't need to keep the refcount elevated; there's no way
14486          * to remove one half of this device without removing the other
14487          */
14488         pci_dev_put(peer);
14489
14490         return peer;
14491 }
14492
14493 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
14494 {
14495         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
14496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
14497                 u32 reg;
14498
14499                 /* All devices that use the alternate
14500                  * ASIC REV location have a CPMU.
14501                  */
14502                 tg3_flag_set(tp, CPMU_PRESENT);
14503
14504                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
14505                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
14506                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
14507                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
14508                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
14509                         reg = TG3PCI_GEN2_PRODID_ASICREV;
14510                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
14511                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
14512                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
14513                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
14514                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14515                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14516                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
14517                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
14518                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
14519                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
14520                         reg = TG3PCI_GEN15_PRODID_ASICREV;
14521                 else
14522                         reg = TG3PCI_PRODID_ASICREV;
14523
14524                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
14525         }
14526
14527         /* Wrong chip ID in 5752 A0. This code can be removed later
14528          * as A0 is not in production.
14529          */
14530         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
14531                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
14532
14533         if (tp->pci_chip_rev_id == CHIPREV_ID_5717_C0)
14534                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
14535
14536         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14537             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14538             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14539                 tg3_flag_set(tp, 5717_PLUS);
14540
14541         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
14542             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
14543                 tg3_flag_set(tp, 57765_CLASS);
14544
14545         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
14546                 tg3_flag_set(tp, 57765_PLUS);
14547
14548         /* Intentionally exclude ASIC_REV_5906 */
14549         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14550             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14551             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14552             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14553             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14554             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14555             tg3_flag(tp, 57765_PLUS))
14556                 tg3_flag_set(tp, 5755_PLUS);
14557
14558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
14559             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14560                 tg3_flag_set(tp, 5780_CLASS);
14561
14562         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14563             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14564             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14565             tg3_flag(tp, 5755_PLUS) ||
14566             tg3_flag(tp, 5780_CLASS))
14567                 tg3_flag_set(tp, 5750_PLUS);
14568
14569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14570             tg3_flag(tp, 5750_PLUS))
14571                 tg3_flag_set(tp, 5705_PLUS);
14572 }
14573
14574 static bool tg3_10_100_only_device(struct tg3 *tp,
14575                                    const struct pci_device_id *ent)
14576 {
14577         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
14578
14579         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14580             (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14581             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14582                 return true;
14583
14584         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
14585                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
14586                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
14587                                 return true;
14588                 } else {
14589                         return true;
14590                 }
14591         }
14592
14593         return false;
14594 }
14595
14596 static int tg3_get_invariants(struct tg3 *tp,
14597                                         const struct pci_device_id *ent)
14598 {
14599         u32 misc_ctrl_reg;
14600         u32 pci_state_reg, grc_misc_cfg;
14601         u32 val;
14602         u16 pci_cmd;
14603         int err;
14604
14605         /* Force memory write invalidate off.  If we leave it on,
14606          * then on 5700_BX chips we have to enable a workaround.
14607          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14608          * to match the cacheline size.  The Broadcom driver have this
14609          * workaround but turns MWI off all the times so never uses
14610          * it.  This seems to suggest that the workaround is insufficient.
14611          */
14612         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14613         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14614         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14615
14616         /* Important! -- Make sure register accesses are byteswapped
14617          * correctly.  Also, for those chips that require it, make
14618          * sure that indirect register accesses are enabled before
14619          * the first operation.
14620          */
14621         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14622                               &misc_ctrl_reg);
14623         tp->misc_host_ctrl |= (misc_ctrl_reg &
14624                                MISC_HOST_CTRL_CHIPREV);
14625         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14626                                tp->misc_host_ctrl);
14627
14628         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14629
14630         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14631          * we need to disable memory and use config. cycles
14632          * only to access all registers. The 5702/03 chips
14633          * can mistakenly decode the special cycles from the
14634          * ICH chipsets as memory write cycles, causing corruption
14635          * of register and memory space. Only certain ICH bridges
14636          * will drive special cycles with non-zero data during the
14637          * address phase which can fall within the 5703's address
14638          * range. This is not an ICH bug as the PCI spec allows
14639          * non-zero address during special cycles. However, only
14640          * these ICH bridges are known to drive non-zero addresses
14641          * during special cycles.
14642          *
14643          * Since special cycles do not cross PCI bridges, we only
14644          * enable this workaround if the 5703 is on the secondary
14645          * bus of these ICH bridges.
14646          */
14647         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14648             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14649                 static struct tg3_dev_id {
14650                         u32     vendor;
14651                         u32     device;
14652                         u32     rev;
14653                 } ich_chipsets[] = {
14654                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14655                           PCI_ANY_ID },
14656                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14657                           PCI_ANY_ID },
14658                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14659                           0xa },
14660                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14661                           PCI_ANY_ID },
14662                         { },
14663                 };
14664                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14665                 struct pci_dev *bridge = NULL;
14666
14667                 while (pci_id->vendor != 0) {
14668                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14669                                                 bridge);
14670                         if (!bridge) {
14671                                 pci_id++;
14672                                 continue;
14673                         }
14674                         if (pci_id->rev != PCI_ANY_ID) {
14675                                 if (bridge->revision > pci_id->rev)
14676                                         continue;
14677                         }
14678                         if (bridge->subordinate &&
14679                             (bridge->subordinate->number ==
14680                              tp->pdev->bus->number)) {
14681                                 tg3_flag_set(tp, ICH_WORKAROUND);
14682                                 pci_dev_put(bridge);
14683                                 break;
14684                         }
14685                 }
14686         }
14687
14688         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14689                 static struct tg3_dev_id {
14690                         u32     vendor;
14691                         u32     device;
14692                 } bridge_chipsets[] = {
14693                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14694                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14695                         { },
14696                 };
14697                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14698                 struct pci_dev *bridge = NULL;
14699
14700                 while (pci_id->vendor != 0) {
14701                         bridge = pci_get_device(pci_id->vendor,
14702                                                 pci_id->device,
14703                                                 bridge);
14704                         if (!bridge) {
14705                                 pci_id++;
14706                                 continue;
14707                         }
14708                         if (bridge->subordinate &&
14709                             (bridge->subordinate->number <=
14710                              tp->pdev->bus->number) &&
14711                             (bridge->subordinate->busn_res.end >=
14712                              tp->pdev->bus->number)) {
14713                                 tg3_flag_set(tp, 5701_DMA_BUG);
14714                                 pci_dev_put(bridge);
14715                                 break;
14716                         }
14717                 }
14718         }
14719
14720         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14721          * DMA addresses > 40-bit. This bridge may have other additional
14722          * 57xx devices behind it in some 4-port NIC designs for example.
14723          * Any tg3 device found behind the bridge will also need the 40-bit
14724          * DMA workaround.
14725          */
14726         if (tg3_flag(tp, 5780_CLASS)) {
14727                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14728                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14729         } else {
14730                 struct pci_dev *bridge = NULL;
14731
14732                 do {
14733                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14734                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14735                                                 bridge);
14736                         if (bridge && bridge->subordinate &&
14737                             (bridge->subordinate->number <=
14738                              tp->pdev->bus->number) &&
14739                             (bridge->subordinate->busn_res.end >=
14740                              tp->pdev->bus->number)) {
14741                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14742                                 pci_dev_put(bridge);
14743                                 break;
14744                         }
14745                 } while (bridge);
14746         }
14747
14748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14749             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14750                 tp->pdev_peer = tg3_find_peer(tp);
14751
14752         /* Determine TSO capabilities */
14753         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14754                 ; /* Do nothing. HW bug. */
14755         else if (tg3_flag(tp, 57765_PLUS))
14756                 tg3_flag_set(tp, HW_TSO_3);
14757         else if (tg3_flag(tp, 5755_PLUS) ||
14758                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14759                 tg3_flag_set(tp, HW_TSO_2);
14760         else if (tg3_flag(tp, 5750_PLUS)) {
14761                 tg3_flag_set(tp, HW_TSO_1);
14762                 tg3_flag_set(tp, TSO_BUG);
14763                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14764                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14765                         tg3_flag_clear(tp, TSO_BUG);
14766         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14767                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14768                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14769                         tg3_flag_set(tp, TSO_BUG);
14770                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14771                         tp->fw_needed = FIRMWARE_TG3TSO5;
14772                 else
14773                         tp->fw_needed = FIRMWARE_TG3TSO;
14774         }
14775
14776         /* Selectively allow TSO based on operating conditions */
14777         if (tg3_flag(tp, HW_TSO_1) ||
14778             tg3_flag(tp, HW_TSO_2) ||
14779             tg3_flag(tp, HW_TSO_3) ||
14780             tp->fw_needed) {
14781                 /* For firmware TSO, assume ASF is disabled.
14782                  * We'll disable TSO later if we discover ASF
14783                  * is enabled in tg3_get_eeprom_hw_cfg().
14784                  */
14785                 tg3_flag_set(tp, TSO_CAPABLE);
14786         } else {
14787                 tg3_flag_clear(tp, TSO_CAPABLE);
14788                 tg3_flag_clear(tp, TSO_BUG);
14789                 tp->fw_needed = NULL;
14790         }
14791
14792         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14793                 tp->fw_needed = FIRMWARE_TG3;
14794
14795         tp->irq_max = 1;
14796
14797         if (tg3_flag(tp, 5750_PLUS)) {
14798                 tg3_flag_set(tp, SUPPORT_MSI);
14799                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14800                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14801                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14802                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14803                      tp->pdev_peer == tp->pdev))
14804                         tg3_flag_clear(tp, SUPPORT_MSI);
14805
14806                 if (tg3_flag(tp, 5755_PLUS) ||
14807                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14808                         tg3_flag_set(tp, 1SHOT_MSI);
14809                 }
14810
14811                 if (tg3_flag(tp, 57765_PLUS)) {
14812                         tg3_flag_set(tp, SUPPORT_MSIX);
14813                         tp->irq_max = TG3_IRQ_MAX_VECS;
14814                 }
14815         }
14816
14817         tp->txq_max = 1;
14818         tp->rxq_max = 1;
14819         if (tp->irq_max > 1) {
14820                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
14821                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
14822
14823                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14824                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14825                         tp->txq_max = tp->irq_max - 1;
14826         }
14827
14828         if (tg3_flag(tp, 5755_PLUS) ||
14829             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14830                 tg3_flag_set(tp, SHORT_DMA_BUG);
14831
14832         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14833                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14834
14835         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14836             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14837             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14838                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14839
14840         if (tg3_flag(tp, 57765_PLUS) &&
14841             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14842                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14843
14844         if (!tg3_flag(tp, 5705_PLUS) ||
14845             tg3_flag(tp, 5780_CLASS) ||
14846             tg3_flag(tp, USE_JUMBO_BDFLAG))
14847                 tg3_flag_set(tp, JUMBO_CAPABLE);
14848
14849         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14850                               &pci_state_reg);
14851
14852         if (pci_is_pcie(tp->pdev)) {
14853                 u16 lnkctl;
14854
14855                 tg3_flag_set(tp, PCI_EXPRESS);
14856
14857                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
14858                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14859                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14860                             ASIC_REV_5906) {
14861                                 tg3_flag_clear(tp, HW_TSO_2);
14862                                 tg3_flag_clear(tp, TSO_CAPABLE);
14863                         }
14864                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14865                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14866                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14867                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14868                                 tg3_flag_set(tp, CLKREQ_BUG);
14869                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14870                         tg3_flag_set(tp, L1PLLPD_EN);
14871                 }
14872         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14873                 /* BCM5785 devices are effectively PCIe devices, and should
14874                  * follow PCIe codepaths, but do not have a PCIe capabilities
14875                  * section.
14876                  */
14877                 tg3_flag_set(tp, PCI_EXPRESS);
14878         } else if (!tg3_flag(tp, 5705_PLUS) ||
14879                    tg3_flag(tp, 5780_CLASS)) {
14880                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14881                 if (!tp->pcix_cap) {
14882                         dev_err(&tp->pdev->dev,
14883                                 "Cannot find PCI-X capability, aborting\n");
14884                         return -EIO;
14885                 }
14886
14887                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14888                         tg3_flag_set(tp, PCIX_MODE);
14889         }
14890
14891         /* If we have an AMD 762 or VIA K8T800 chipset, write
14892          * reordering to the mailbox registers done by the host
14893          * controller can cause major troubles.  We read back from
14894          * every mailbox register write to force the writes to be
14895          * posted to the chip in order.
14896          */
14897         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14898             !tg3_flag(tp, PCI_EXPRESS))
14899                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14900
14901         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14902                              &tp->pci_cacheline_sz);
14903         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14904                              &tp->pci_lat_timer);
14905         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14906             tp->pci_lat_timer < 64) {
14907                 tp->pci_lat_timer = 64;
14908                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14909                                       tp->pci_lat_timer);
14910         }
14911
14912         /* Important! -- It is critical that the PCI-X hw workaround
14913          * situation is decided before the first MMIO register access.
14914          */
14915         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14916                 /* 5700 BX chips need to have their TX producer index
14917                  * mailboxes written twice to workaround a bug.
14918                  */
14919                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14920
14921                 /* If we are in PCI-X mode, enable register write workaround.
14922                  *
14923                  * The workaround is to use indirect register accesses
14924                  * for all chip writes not to mailbox registers.
14925                  */
14926                 if (tg3_flag(tp, PCIX_MODE)) {
14927                         u32 pm_reg;
14928
14929                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14930
14931                         /* The chip can have it's power management PCI config
14932                          * space registers clobbered due to this bug.
14933                          * So explicitly force the chip into D0 here.
14934                          */
14935                         pci_read_config_dword(tp->pdev,
14936                                               tp->pm_cap + PCI_PM_CTRL,
14937                                               &pm_reg);
14938                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14939                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14940                         pci_write_config_dword(tp->pdev,
14941                                                tp->pm_cap + PCI_PM_CTRL,
14942                                                pm_reg);
14943
14944                         /* Also, force SERR#/PERR# in PCI command. */
14945                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14946                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14947                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14948                 }
14949         }
14950
14951         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14952                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14953         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14954                 tg3_flag_set(tp, PCI_32BIT);
14955
14956         /* Chip-specific fixup from Broadcom driver */
14957         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14958             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14959                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14960                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14961         }
14962
14963         /* Default fast path register access methods */
14964         tp->read32 = tg3_read32;
14965         tp->write32 = tg3_write32;
14966         tp->read32_mbox = tg3_read32;
14967         tp->write32_mbox = tg3_write32;
14968         tp->write32_tx_mbox = tg3_write32;
14969         tp->write32_rx_mbox = tg3_write32;
14970
14971         /* Various workaround register access methods */
14972         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14973                 tp->write32 = tg3_write_indirect_reg32;
14974         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14975                  (tg3_flag(tp, PCI_EXPRESS) &&
14976                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14977                 /*
14978                  * Back to back register writes can cause problems on these
14979                  * chips, the workaround is to read back all reg writes
14980                  * except those to mailbox regs.
14981                  *
14982                  * See tg3_write_indirect_reg32().
14983                  */
14984                 tp->write32 = tg3_write_flush_reg32;
14985         }
14986
14987         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14988                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14989                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14990                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14991         }
14992
14993         if (tg3_flag(tp, ICH_WORKAROUND)) {
14994                 tp->read32 = tg3_read_indirect_reg32;
14995                 tp->write32 = tg3_write_indirect_reg32;
14996                 tp->read32_mbox = tg3_read_indirect_mbox;
14997                 tp->write32_mbox = tg3_write_indirect_mbox;
14998                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14999                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
15000
15001                 iounmap(tp->regs);
15002                 tp->regs = NULL;
15003
15004                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15005                 pci_cmd &= ~PCI_COMMAND_MEMORY;
15006                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15007         }
15008         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15009                 tp->read32_mbox = tg3_read32_mbox_5906;
15010                 tp->write32_mbox = tg3_write32_mbox_5906;
15011                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
15012                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
15013         }
15014
15015         if (tp->write32 == tg3_write_indirect_reg32 ||
15016             (tg3_flag(tp, PCIX_MODE) &&
15017              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15018               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
15019                 tg3_flag_set(tp, SRAM_USE_CONFIG);
15020
15021         /* The memory arbiter has to be enabled in order for SRAM accesses
15022          * to succeed.  Normally on powerup the tg3 chip firmware will make
15023          * sure it is enabled, but other entities such as system netboot
15024          * code might disable it.
15025          */
15026         val = tr32(MEMARB_MODE);
15027         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
15028
15029         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
15030         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15031             tg3_flag(tp, 5780_CLASS)) {
15032                 if (tg3_flag(tp, PCIX_MODE)) {
15033                         pci_read_config_dword(tp->pdev,
15034                                               tp->pcix_cap + PCI_X_STATUS,
15035                                               &val);
15036                         tp->pci_fn = val & 0x7;
15037                 }
15038         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
15039                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15040                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15041                     NIC_SRAM_CPMUSTAT_SIG) {
15042                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
15043                         tp->pci_fn = tp->pci_fn ? 1 : 0;
15044                 }
15045         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
15046                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
15047                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
15048                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
15049                     NIC_SRAM_CPMUSTAT_SIG) {
15050                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
15051                                      TG3_CPMU_STATUS_FSHFT_5719;
15052                 }
15053         }
15054
15055         /* Get eeprom hw config before calling tg3_set_power_state().
15056          * In particular, the TG3_FLAG_IS_NIC flag must be
15057          * determined before calling tg3_set_power_state() so that
15058          * we know whether or not to switch out of Vaux power.
15059          * When the flag is set, it means that GPIO1 is used for eeprom
15060          * write protect and also implies that it is a LOM where GPIOs
15061          * are not used to switch power.
15062          */
15063         tg3_get_eeprom_hw_cfg(tp);
15064
15065         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
15066                 tg3_flag_clear(tp, TSO_CAPABLE);
15067                 tg3_flag_clear(tp, TSO_BUG);
15068                 tp->fw_needed = NULL;
15069         }
15070
15071         if (tg3_flag(tp, ENABLE_APE)) {
15072                 /* Allow reads and writes to the
15073                  * APE register and memory space.
15074                  */
15075                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
15076                                  PCISTATE_ALLOW_APE_SHMEM_WR |
15077                                  PCISTATE_ALLOW_APE_PSPACE_WR;
15078                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
15079                                        pci_state_reg);
15080
15081                 tg3_ape_lock_init(tp);
15082         }
15083
15084         /* Set up tp->grc_local_ctrl before calling
15085          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
15086          * will bring 5700's external PHY out of reset.
15087          * It is also used as eeprom write protect on LOMs.
15088          */
15089         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
15090         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15091             tg3_flag(tp, EEPROM_WRITE_PROT))
15092                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
15093                                        GRC_LCLCTRL_GPIO_OUTPUT1);
15094         /* Unused GPIO3 must be driven as output on 5752 because there
15095          * are no pull-up resistors on unused GPIO pins.
15096          */
15097         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
15098                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
15099
15100         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15101             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
15102             tg3_flag(tp, 57765_CLASS))
15103                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15104
15105         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15106             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
15107                 /* Turn off the debug UART. */
15108                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
15109                 if (tg3_flag(tp, IS_NIC))
15110                         /* Keep VMain power. */
15111                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
15112                                               GRC_LCLCTRL_GPIO_OUTPUT0;
15113         }
15114
15115         /* Switch out of Vaux if it is a NIC */
15116         tg3_pwrsrc_switch_to_vmain(tp);
15117
15118         /* Derive initial jumbo mode from MTU assigned in
15119          * ether_setup() via the alloc_etherdev() call
15120          */
15121         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
15122                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
15123
15124         /* Determine WakeOnLan speed to use. */
15125         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15126             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
15127             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
15128             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
15129                 tg3_flag_clear(tp, WOL_SPEED_100MB);
15130         } else {
15131                 tg3_flag_set(tp, WOL_SPEED_100MB);
15132         }
15133
15134         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15135                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
15136
15137         /* A few boards don't want Ethernet@WireSpeed phy feature */
15138         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15139             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15140              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
15141              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
15142             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
15143             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15144                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
15145
15146         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
15147             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
15148                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
15149         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
15150                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
15151
15152         if (tg3_flag(tp, 5705_PLUS) &&
15153             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
15154             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
15155             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
15156             !tg3_flag(tp, 57765_PLUS)) {
15157                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
15158                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
15159                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
15160                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
15161                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
15162                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
15163                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
15164                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
15165                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
15166                 } else
15167                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
15168         }
15169
15170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15171             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
15172                 tp->phy_otp = tg3_read_otp_phycfg(tp);
15173                 if (tp->phy_otp == 0)
15174                         tp->phy_otp = TG3_OTP_DEFAULT;
15175         }
15176
15177         if (tg3_flag(tp, CPMU_PRESENT))
15178                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
15179         else
15180                 tp->mi_mode = MAC_MI_MODE_BASE;
15181
15182         tp->coalesce_mode = 0;
15183         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
15184             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
15185                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
15186
15187         /* Set these bits to enable statistics workaround. */
15188         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
15189             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
15190             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
15191                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
15192                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
15193         }
15194
15195         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15196             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15197                 tg3_flag_set(tp, USE_PHYLIB);
15198
15199         err = tg3_mdio_init(tp);
15200         if (err)
15201                 return err;
15202
15203         /* Initialize data/descriptor byte/word swapping. */
15204         val = tr32(GRC_MODE);
15205         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
15206                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
15207                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
15208                         GRC_MODE_B2HRX_ENABLE |
15209                         GRC_MODE_HTX2B_ENABLE |
15210                         GRC_MODE_HOST_STACKUP);
15211         else
15212                 val &= GRC_MODE_HOST_STACKUP;
15213
15214         tw32(GRC_MODE, val | tp->grc_mode);
15215
15216         tg3_switch_clocks(tp);
15217
15218         /* Clear this out for sanity. */
15219         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
15220
15221         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15222                               &pci_state_reg);
15223         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
15224             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
15225                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
15226
15227                 if (chiprevid == CHIPREV_ID_5701_A0 ||
15228                     chiprevid == CHIPREV_ID_5701_B0 ||
15229                     chiprevid == CHIPREV_ID_5701_B2 ||
15230                     chiprevid == CHIPREV_ID_5701_B5) {
15231                         void __iomem *sram_base;
15232
15233                         /* Write some dummy words into the SRAM status block
15234                          * area, see if it reads back correctly.  If the return
15235                          * value is bad, force enable the PCIX workaround.
15236                          */
15237                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
15238
15239                         writel(0x00000000, sram_base);
15240                         writel(0x00000000, sram_base + 4);
15241                         writel(0xffffffff, sram_base + 4);
15242                         if (readl(sram_base) != 0x00000000)
15243                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
15244                 }
15245         }
15246
15247         udelay(50);
15248         tg3_nvram_init(tp);
15249
15250         grc_misc_cfg = tr32(GRC_MISC_CFG);
15251         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15252
15253         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
15254             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
15255              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
15256                 tg3_flag_set(tp, IS_5788);
15257
15258         if (!tg3_flag(tp, IS_5788) &&
15259             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
15260                 tg3_flag_set(tp, TAGGED_STATUS);
15261         if (tg3_flag(tp, TAGGED_STATUS)) {
15262                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
15263                                       HOSTCC_MODE_CLRTICK_TXBD);
15264
15265                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
15266                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15267                                        tp->misc_host_ctrl);
15268         }
15269
15270         /* Preserve the APE MAC_MODE bits */
15271         if (tg3_flag(tp, ENABLE_APE))
15272                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
15273         else
15274                 tp->mac_mode = 0;
15275
15276         if (tg3_10_100_only_device(tp, ent))
15277                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
15278
15279         err = tg3_phy_probe(tp);
15280         if (err) {
15281                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
15282                 /* ... but do not return immediately ... */
15283                 tg3_mdio_fini(tp);
15284         }
15285
15286         tg3_read_vpd(tp);
15287         tg3_read_fw_ver(tp);
15288
15289         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
15290                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15291         } else {
15292                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15293                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15294                 else
15295                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
15296         }
15297
15298         /* 5700 {AX,BX} chips have a broken status block link
15299          * change bit implementation, so we must use the
15300          * status register in those cases.
15301          */
15302         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
15303                 tg3_flag_set(tp, USE_LINKCHG_REG);
15304         else
15305                 tg3_flag_clear(tp, USE_LINKCHG_REG);
15306
15307         /* The led_ctrl is set during tg3_phy_probe, here we might
15308          * have to force the link status polling mechanism based
15309          * upon subsystem IDs.
15310          */
15311         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
15312             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15313             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
15314                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
15315                 tg3_flag_set(tp, USE_LINKCHG_REG);
15316         }
15317
15318         /* For all SERDES we poll the MAC status register. */
15319         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
15320                 tg3_flag_set(tp, POLL_SERDES);
15321         else
15322                 tg3_flag_clear(tp, POLL_SERDES);
15323
15324         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
15325         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
15326         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
15327             tg3_flag(tp, PCIX_MODE)) {
15328                 tp->rx_offset = NET_SKB_PAD;
15329 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
15330                 tp->rx_copy_thresh = ~(u16)0;
15331 #endif
15332         }
15333
15334         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
15335         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
15336         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
15337
15338         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
15339
15340         /* Increment the rx prod index on the rx std ring by at most
15341          * 8 for these chips to workaround hw errata.
15342          */
15343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
15344             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
15345             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
15346                 tp->rx_std_max_post = 8;
15347
15348         if (tg3_flag(tp, ASPM_WORKAROUND))
15349                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
15350                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
15351
15352         return err;
15353 }
15354
15355 #ifdef CONFIG_SPARC
15356 static int tg3_get_macaddr_sparc(struct tg3 *tp)
15357 {
15358         struct net_device *dev = tp->dev;
15359         struct pci_dev *pdev = tp->pdev;
15360         struct device_node *dp = pci_device_to_OF_node(pdev);
15361         const unsigned char *addr;
15362         int len;
15363
15364         addr = of_get_property(dp, "local-mac-address", &len);
15365         if (addr && len == 6) {
15366                 memcpy(dev->dev_addr, addr, 6);
15367                 memcpy(dev->perm_addr, dev->dev_addr, 6);
15368                 return 0;
15369         }
15370         return -ENODEV;
15371 }
15372
15373 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
15374 {
15375         struct net_device *dev = tp->dev;
15376
15377         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
15378         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
15379         return 0;
15380 }
15381 #endif
15382
15383 static int tg3_get_device_address(struct tg3 *tp)
15384 {
15385         struct net_device *dev = tp->dev;
15386         u32 hi, lo, mac_offset;
15387         int addr_ok = 0;
15388
15389 #ifdef CONFIG_SPARC
15390         if (!tg3_get_macaddr_sparc(tp))
15391                 return 0;
15392 #endif
15393
15394         mac_offset = 0x7c;
15395         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
15396             tg3_flag(tp, 5780_CLASS)) {
15397                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
15398                         mac_offset = 0xcc;
15399                 if (tg3_nvram_lock(tp))
15400                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
15401                 else
15402                         tg3_nvram_unlock(tp);
15403         } else if (tg3_flag(tp, 5717_PLUS)) {
15404                 if (tp->pci_fn & 1)
15405                         mac_offset = 0xcc;
15406                 if (tp->pci_fn > 1)
15407                         mac_offset += 0x18c;
15408         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
15409                 mac_offset = 0x10;
15410
15411         /* First try to get it from MAC address mailbox. */
15412         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
15413         if ((hi >> 16) == 0x484b) {
15414                 dev->dev_addr[0] = (hi >>  8) & 0xff;
15415                 dev->dev_addr[1] = (hi >>  0) & 0xff;
15416
15417                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
15418                 dev->dev_addr[2] = (lo >> 24) & 0xff;
15419                 dev->dev_addr[3] = (lo >> 16) & 0xff;
15420                 dev->dev_addr[4] = (lo >>  8) & 0xff;
15421                 dev->dev_addr[5] = (lo >>  0) & 0xff;
15422
15423                 /* Some old bootcode may report a 0 MAC address in SRAM */
15424                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
15425         }
15426         if (!addr_ok) {
15427                 /* Next, try NVRAM. */
15428                 if (!tg3_flag(tp, NO_NVRAM) &&
15429                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
15430                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
15431                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
15432                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
15433                 }
15434                 /* Finally just fetch it out of the MAC control regs. */
15435                 else {
15436                         hi = tr32(MAC_ADDR_0_HIGH);
15437                         lo = tr32(MAC_ADDR_0_LOW);
15438
15439                         dev->dev_addr[5] = lo & 0xff;
15440                         dev->dev_addr[4] = (lo >> 8) & 0xff;
15441                         dev->dev_addr[3] = (lo >> 16) & 0xff;
15442                         dev->dev_addr[2] = (lo >> 24) & 0xff;
15443                         dev->dev_addr[1] = hi & 0xff;
15444                         dev->dev_addr[0] = (hi >> 8) & 0xff;
15445                 }
15446         }
15447
15448         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
15449 #ifdef CONFIG_SPARC
15450                 if (!tg3_get_default_macaddr_sparc(tp))
15451                         return 0;
15452 #endif
15453                 return -EINVAL;
15454         }
15455         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
15456         return 0;
15457 }
15458
15459 #define BOUNDARY_SINGLE_CACHELINE       1
15460 #define BOUNDARY_MULTI_CACHELINE        2
15461
15462 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
15463 {
15464         int cacheline_size;
15465         u8 byte;
15466         int goal;
15467
15468         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
15469         if (byte == 0)
15470                 cacheline_size = 1024;
15471         else
15472                 cacheline_size = (int) byte * 4;
15473
15474         /* On 5703 and later chips, the boundary bits have no
15475          * effect.
15476          */
15477         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15478             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
15479             !tg3_flag(tp, PCI_EXPRESS))
15480                 goto out;
15481
15482 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
15483         goal = BOUNDARY_MULTI_CACHELINE;
15484 #else
15485 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
15486         goal = BOUNDARY_SINGLE_CACHELINE;
15487 #else
15488         goal = 0;
15489 #endif
15490 #endif
15491
15492         if (tg3_flag(tp, 57765_PLUS)) {
15493                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
15494                 goto out;
15495         }
15496
15497         if (!goal)
15498                 goto out;
15499
15500         /* PCI controllers on most RISC systems tend to disconnect
15501          * when a device tries to burst across a cache-line boundary.
15502          * Therefore, letting tg3 do so just wastes PCI bandwidth.
15503          *
15504          * Unfortunately, for PCI-E there are only limited
15505          * write-side controls for this, and thus for reads
15506          * we will still get the disconnects.  We'll also waste
15507          * these PCI cycles for both read and write for chips
15508          * other than 5700 and 5701 which do not implement the
15509          * boundary bits.
15510          */
15511         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
15512                 switch (cacheline_size) {
15513                 case 16:
15514                 case 32:
15515                 case 64:
15516                 case 128:
15517                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15518                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
15519                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
15520                         } else {
15521                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15522                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15523                         }
15524                         break;
15525
15526                 case 256:
15527                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
15528                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
15529                         break;
15530
15531                 default:
15532                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
15533                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
15534                         break;
15535                 }
15536         } else if (tg3_flag(tp, PCI_EXPRESS)) {
15537                 switch (cacheline_size) {
15538                 case 16:
15539                 case 32:
15540                 case 64:
15541                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15542                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15543                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
15544                                 break;
15545                         }
15546                         /* fallthrough */
15547                 case 128:
15548                 default:
15549                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
15550                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
15551                         break;
15552                 }
15553         } else {
15554                 switch (cacheline_size) {
15555                 case 16:
15556                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15557                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
15558                                         DMA_RWCTRL_WRITE_BNDRY_16);
15559                                 break;
15560                         }
15561                         /* fallthrough */
15562                 case 32:
15563                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15564                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
15565                                         DMA_RWCTRL_WRITE_BNDRY_32);
15566                                 break;
15567                         }
15568                         /* fallthrough */
15569                 case 64:
15570                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15571                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
15572                                         DMA_RWCTRL_WRITE_BNDRY_64);
15573                                 break;
15574                         }
15575                         /* fallthrough */
15576                 case 128:
15577                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
15578                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
15579                                         DMA_RWCTRL_WRITE_BNDRY_128);
15580                                 break;
15581                         }
15582                         /* fallthrough */
15583                 case 256:
15584                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
15585                                 DMA_RWCTRL_WRITE_BNDRY_256);
15586                         break;
15587                 case 512:
15588                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
15589                                 DMA_RWCTRL_WRITE_BNDRY_512);
15590                         break;
15591                 case 1024:
15592                 default:
15593                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
15594                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15595                         break;
15596                 }
15597         }
15598
15599 out:
15600         return val;
15601 }
15602
15603 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
15604                            int size, int to_device)
15605 {
15606         struct tg3_internal_buffer_desc test_desc;
15607         u32 sram_dma_descs;
15608         int i, ret;
15609
15610         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15611
15612         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15613         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15614         tw32(RDMAC_STATUS, 0);
15615         tw32(WDMAC_STATUS, 0);
15616
15617         tw32(BUFMGR_MODE, 0);
15618         tw32(FTQ_RESET, 0);
15619
15620         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15621         test_desc.addr_lo = buf_dma & 0xffffffff;
15622         test_desc.nic_mbuf = 0x00002100;
15623         test_desc.len = size;
15624
15625         /*
15626          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15627          * the *second* time the tg3 driver was getting loaded after an
15628          * initial scan.
15629          *
15630          * Broadcom tells me:
15631          *   ...the DMA engine is connected to the GRC block and a DMA
15632          *   reset may affect the GRC block in some unpredictable way...
15633          *   The behavior of resets to individual blocks has not been tested.
15634          *
15635          * Broadcom noted the GRC reset will also reset all sub-components.
15636          */
15637         if (to_device) {
15638                 test_desc.cqid_sqid = (13 << 8) | 2;
15639
15640                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15641                 udelay(40);
15642         } else {
15643                 test_desc.cqid_sqid = (16 << 8) | 7;
15644
15645                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15646                 udelay(40);
15647         }
15648         test_desc.flags = 0x00000005;
15649
15650         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15651                 u32 val;
15652
15653                 val = *(((u32 *)&test_desc) + i);
15654                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15655                                        sram_dma_descs + (i * sizeof(u32)));
15656                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15657         }
15658         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15659
15660         if (to_device)
15661                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15662         else
15663                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15664
15665         ret = -ENODEV;
15666         for (i = 0; i < 40; i++) {
15667                 u32 val;
15668
15669                 if (to_device)
15670                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15671                 else
15672                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15673                 if ((val & 0xffff) == sram_dma_descs) {
15674                         ret = 0;
15675                         break;
15676                 }
15677
15678                 udelay(100);
15679         }
15680
15681         return ret;
15682 }
15683
15684 #define TEST_BUFFER_SIZE        0x2000
15685
15686 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15687         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15688         { },
15689 };
15690
15691 static int tg3_test_dma(struct tg3 *tp)
15692 {
15693         dma_addr_t buf_dma;
15694         u32 *buf, saved_dma_rwctrl;
15695         int ret = 0;
15696
15697         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15698                                  &buf_dma, GFP_KERNEL);
15699         if (!buf) {
15700                 ret = -ENOMEM;
15701                 goto out_nofree;
15702         }
15703
15704         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15705                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15706
15707         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15708
15709         if (tg3_flag(tp, 57765_PLUS))
15710                 goto out;
15711
15712         if (tg3_flag(tp, PCI_EXPRESS)) {
15713                 /* DMA read watermark not used on PCIE */
15714                 tp->dma_rwctrl |= 0x00180000;
15715         } else if (!tg3_flag(tp, PCIX_MODE)) {
15716                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15717                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15718                         tp->dma_rwctrl |= 0x003f0000;
15719                 else
15720                         tp->dma_rwctrl |= 0x003f000f;
15721         } else {
15722                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15723                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15724                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15725                         u32 read_water = 0x7;
15726
15727                         /* If the 5704 is behind the EPB bridge, we can
15728                          * do the less restrictive ONE_DMA workaround for
15729                          * better performance.
15730                          */
15731                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15732                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15733                                 tp->dma_rwctrl |= 0x8000;
15734                         else if (ccval == 0x6 || ccval == 0x7)
15735                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15736
15737                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15738                                 read_water = 4;
15739                         /* Set bit 23 to enable PCIX hw bug fix */
15740                         tp->dma_rwctrl |=
15741                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15742                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15743                                 (1 << 23);
15744                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15745                         /* 5780 always in PCIX mode */
15746                         tp->dma_rwctrl |= 0x00144000;
15747                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15748                         /* 5714 always in PCIX mode */
15749                         tp->dma_rwctrl |= 0x00148000;
15750                 } else {
15751                         tp->dma_rwctrl |= 0x001b000f;
15752                 }
15753         }
15754
15755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15756             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15757                 tp->dma_rwctrl &= 0xfffffff0;
15758
15759         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15760             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15761                 /* Remove this if it causes problems for some boards. */
15762                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15763
15764                 /* On 5700/5701 chips, we need to set this bit.
15765                  * Otherwise the chip will issue cacheline transactions
15766                  * to streamable DMA memory with not all the byte
15767                  * enables turned on.  This is an error on several
15768                  * RISC PCI controllers, in particular sparc64.
15769                  *
15770                  * On 5703/5704 chips, this bit has been reassigned
15771                  * a different meaning.  In particular, it is used
15772                  * on those chips to enable a PCI-X workaround.
15773                  */
15774                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15775         }
15776
15777         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15778
15779 #if 0
15780         /* Unneeded, already done by tg3_get_invariants.  */
15781         tg3_switch_clocks(tp);
15782 #endif
15783
15784         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15785             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15786                 goto out;
15787
15788         /* It is best to perform DMA test with maximum write burst size
15789          * to expose the 5700/5701 write DMA bug.
15790          */
15791         saved_dma_rwctrl = tp->dma_rwctrl;
15792         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15793         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15794
15795         while (1) {
15796                 u32 *p = buf, i;
15797
15798                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15799                         p[i] = i;
15800
15801                 /* Send the buffer to the chip. */
15802                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15803                 if (ret) {
15804                         dev_err(&tp->pdev->dev,
15805                                 "%s: Buffer write failed. err = %d\n",
15806                                 __func__, ret);
15807                         break;
15808                 }
15809
15810 #if 0
15811                 /* validate data reached card RAM correctly. */
15812                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15813                         u32 val;
15814                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15815                         if (le32_to_cpu(val) != p[i]) {
15816                                 dev_err(&tp->pdev->dev,
15817                                         "%s: Buffer corrupted on device! "
15818                                         "(%d != %d)\n", __func__, val, i);
15819                                 /* ret = -ENODEV here? */
15820                         }
15821                         p[i] = 0;
15822                 }
15823 #endif
15824                 /* Now read it back. */
15825                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15826                 if (ret) {
15827                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15828                                 "err = %d\n", __func__, ret);
15829                         break;
15830                 }
15831
15832                 /* Verify it. */
15833                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15834                         if (p[i] == i)
15835                                 continue;
15836
15837                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15838                             DMA_RWCTRL_WRITE_BNDRY_16) {
15839                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15840                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15841                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15842                                 break;
15843                         } else {
15844                                 dev_err(&tp->pdev->dev,
15845                                         "%s: Buffer corrupted on read back! "
15846                                         "(%d != %d)\n", __func__, p[i], i);
15847                                 ret = -ENODEV;
15848                                 goto out;
15849                         }
15850                 }
15851
15852                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15853                         /* Success. */
15854                         ret = 0;
15855                         break;
15856                 }
15857         }
15858         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15859             DMA_RWCTRL_WRITE_BNDRY_16) {
15860                 /* DMA test passed without adjusting DMA boundary,
15861                  * now look for chipsets that are known to expose the
15862                  * DMA bug without failing the test.
15863                  */
15864                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15865                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15866                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15867                 } else {
15868                         /* Safe to use the calculated DMA boundary. */
15869                         tp->dma_rwctrl = saved_dma_rwctrl;
15870                 }
15871
15872                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15873         }
15874
15875 out:
15876         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15877 out_nofree:
15878         return ret;
15879 }
15880
15881 static void tg3_init_bufmgr_config(struct tg3 *tp)
15882 {
15883         if (tg3_flag(tp, 57765_PLUS)) {
15884                 tp->bufmgr_config.mbuf_read_dma_low_water =
15885                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15886                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15887                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15888                 tp->bufmgr_config.mbuf_high_water =
15889                         DEFAULT_MB_HIGH_WATER_57765;
15890
15891                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15892                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15893                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15894                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15895                 tp->bufmgr_config.mbuf_high_water_jumbo =
15896                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15897         } else if (tg3_flag(tp, 5705_PLUS)) {
15898                 tp->bufmgr_config.mbuf_read_dma_low_water =
15899                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15900                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15901                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15902                 tp->bufmgr_config.mbuf_high_water =
15903                         DEFAULT_MB_HIGH_WATER_5705;
15904                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15905                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15906                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15907                         tp->bufmgr_config.mbuf_high_water =
15908                                 DEFAULT_MB_HIGH_WATER_5906;
15909                 }
15910
15911                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15912                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15913                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15914                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15915                 tp->bufmgr_config.mbuf_high_water_jumbo =
15916                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15917         } else {
15918                 tp->bufmgr_config.mbuf_read_dma_low_water =
15919                         DEFAULT_MB_RDMA_LOW_WATER;
15920                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15921                         DEFAULT_MB_MACRX_LOW_WATER;
15922                 tp->bufmgr_config.mbuf_high_water =
15923                         DEFAULT_MB_HIGH_WATER;
15924
15925                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15926                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15927                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15928                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15929                 tp->bufmgr_config.mbuf_high_water_jumbo =
15930                         DEFAULT_MB_HIGH_WATER_JUMBO;
15931         }
15932
15933         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15934         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15935 }
15936
15937 static char *tg3_phy_string(struct tg3 *tp)
15938 {
15939         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15940         case TG3_PHY_ID_BCM5400:        return "5400";
15941         case TG3_PHY_ID_BCM5401:        return "5401";
15942         case TG3_PHY_ID_BCM5411:        return "5411";
15943         case TG3_PHY_ID_BCM5701:        return "5701";
15944         case TG3_PHY_ID_BCM5703:        return "5703";
15945         case TG3_PHY_ID_BCM5704:        return "5704";
15946         case TG3_PHY_ID_BCM5705:        return "5705";
15947         case TG3_PHY_ID_BCM5750:        return "5750";
15948         case TG3_PHY_ID_BCM5752:        return "5752";
15949         case TG3_PHY_ID_BCM5714:        return "5714";
15950         case TG3_PHY_ID_BCM5780:        return "5780";
15951         case TG3_PHY_ID_BCM5755:        return "5755";
15952         case TG3_PHY_ID_BCM5787:        return "5787";
15953         case TG3_PHY_ID_BCM5784:        return "5784";
15954         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15955         case TG3_PHY_ID_BCM5906:        return "5906";
15956         case TG3_PHY_ID_BCM5761:        return "5761";
15957         case TG3_PHY_ID_BCM5718C:       return "5718C";
15958         case TG3_PHY_ID_BCM5718S:       return "5718S";
15959         case TG3_PHY_ID_BCM57765:       return "57765";
15960         case TG3_PHY_ID_BCM5719C:       return "5719C";
15961         case TG3_PHY_ID_BCM5720C:       return "5720C";
15962         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15963         case 0:                 return "serdes";
15964         default:                return "unknown";
15965         }
15966 }
15967
15968 static char *tg3_bus_string(struct tg3 *tp, char *str)
15969 {
15970         if (tg3_flag(tp, PCI_EXPRESS)) {
15971                 strcpy(str, "PCI Express");
15972                 return str;
15973         } else if (tg3_flag(tp, PCIX_MODE)) {
15974                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15975
15976                 strcpy(str, "PCIX:");
15977
15978                 if ((clock_ctrl == 7) ||
15979                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15980                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15981                         strcat(str, "133MHz");
15982                 else if (clock_ctrl == 0)
15983                         strcat(str, "33MHz");
15984                 else if (clock_ctrl == 2)
15985                         strcat(str, "50MHz");
15986                 else if (clock_ctrl == 4)
15987                         strcat(str, "66MHz");
15988                 else if (clock_ctrl == 6)
15989                         strcat(str, "100MHz");
15990         } else {
15991                 strcpy(str, "PCI:");
15992                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15993                         strcat(str, "66MHz");
15994                 else
15995                         strcat(str, "33MHz");
15996         }
15997         if (tg3_flag(tp, PCI_32BIT))
15998                 strcat(str, ":32-bit");
15999         else
16000                 strcat(str, ":64-bit");
16001         return str;
16002 }
16003
16004 static void tg3_init_coal(struct tg3 *tp)
16005 {
16006         struct ethtool_coalesce *ec = &tp->coal;
16007
16008         memset(ec, 0, sizeof(*ec));
16009         ec->cmd = ETHTOOL_GCOALESCE;
16010         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
16011         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
16012         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
16013         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
16014         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
16015         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
16016         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
16017         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
16018         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
16019
16020         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
16021                                  HOSTCC_MODE_CLRTICK_TXBD)) {
16022                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
16023                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
16024                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
16025                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
16026         }
16027
16028         if (tg3_flag(tp, 5705_PLUS)) {
16029                 ec->rx_coalesce_usecs_irq = 0;
16030                 ec->tx_coalesce_usecs_irq = 0;
16031                 ec->stats_block_coalesce_usecs = 0;
16032         }
16033 }
16034
16035 static int tg3_init_one(struct pci_dev *pdev,
16036                                   const struct pci_device_id *ent)
16037 {
16038         struct net_device *dev;
16039         struct tg3 *tp;
16040         int i, err, pm_cap;
16041         u32 sndmbx, rcvmbx, intmbx;
16042         char str[40];
16043         u64 dma_mask, persist_dma_mask;
16044         netdev_features_t features = 0;
16045
16046         printk_once(KERN_INFO "%s\n", version);
16047
16048         err = pci_enable_device(pdev);
16049         if (err) {
16050                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
16051                 return err;
16052         }
16053
16054         err = pci_request_regions(pdev, DRV_MODULE_NAME);
16055         if (err) {
16056                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
16057                 goto err_out_disable_pdev;
16058         }
16059
16060         pci_set_master(pdev);
16061
16062         /* Find power-management capability. */
16063         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
16064         if (pm_cap == 0) {
16065                 dev_err(&pdev->dev,
16066                         "Cannot find Power Management capability, aborting\n");
16067                 err = -EIO;
16068                 goto err_out_free_res;
16069         }
16070
16071         err = pci_set_power_state(pdev, PCI_D0);
16072         if (err) {
16073                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
16074                 goto err_out_free_res;
16075         }
16076
16077         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
16078         if (!dev) {
16079                 err = -ENOMEM;
16080                 goto err_out_power_down;
16081         }
16082
16083         SET_NETDEV_DEV(dev, &pdev->dev);
16084
16085         tp = netdev_priv(dev);
16086         tp->pdev = pdev;
16087         tp->dev = dev;
16088         tp->pm_cap = pm_cap;
16089         tp->rx_mode = TG3_DEF_RX_MODE;
16090         tp->tx_mode = TG3_DEF_TX_MODE;
16091
16092         if (tg3_debug > 0)
16093                 tp->msg_enable = tg3_debug;
16094         else
16095                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
16096
16097         /* The word/byte swap controls here control register access byte
16098          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
16099          * setting below.
16100          */
16101         tp->misc_host_ctrl =
16102                 MISC_HOST_CTRL_MASK_PCI_INT |
16103                 MISC_HOST_CTRL_WORD_SWAP |
16104                 MISC_HOST_CTRL_INDIR_ACCESS |
16105                 MISC_HOST_CTRL_PCISTATE_RW;
16106
16107         /* The NONFRM (non-frame) byte/word swap controls take effect
16108          * on descriptor entries, anything which isn't packet data.
16109          *
16110          * The StrongARM chips on the board (one for tx, one for rx)
16111          * are running in big-endian mode.
16112          */
16113         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
16114                         GRC_MODE_WSWAP_NONFRM_DATA);
16115 #ifdef __BIG_ENDIAN
16116         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
16117 #endif
16118         spin_lock_init(&tp->lock);
16119         spin_lock_init(&tp->indirect_lock);
16120         INIT_WORK(&tp->reset_task, tg3_reset_task);
16121
16122         tp->regs = pci_ioremap_bar(pdev, BAR_0);
16123         if (!tp->regs) {
16124                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
16125                 err = -ENOMEM;
16126                 goto err_out_free_dev;
16127         }
16128
16129         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16130             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
16131             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
16132             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
16133             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16134             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16135             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16136             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16137             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
16138                 tg3_flag_set(tp, ENABLE_APE);
16139                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
16140                 if (!tp->aperegs) {
16141                         dev_err(&pdev->dev,
16142                                 "Cannot map APE registers, aborting\n");
16143                         err = -ENOMEM;
16144                         goto err_out_iounmap;
16145                 }
16146         }
16147
16148         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
16149         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
16150
16151         dev->ethtool_ops = &tg3_ethtool_ops;
16152         dev->watchdog_timeo = TG3_TX_TIMEOUT;
16153         dev->netdev_ops = &tg3_netdev_ops;
16154         dev->irq = pdev->irq;
16155
16156         err = tg3_get_invariants(tp, ent);
16157         if (err) {
16158                 dev_err(&pdev->dev,
16159                         "Problem fetching invariants of chip, aborting\n");
16160                 goto err_out_apeunmap;
16161         }
16162
16163         /* The EPB bridge inside 5714, 5715, and 5780 and any
16164          * device behind the EPB cannot support DMA addresses > 40-bit.
16165          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
16166          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
16167          * do DMA address check in tg3_start_xmit().
16168          */
16169         if (tg3_flag(tp, IS_5788))
16170                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
16171         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
16172                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
16173 #ifdef CONFIG_HIGHMEM
16174                 dma_mask = DMA_BIT_MASK(64);
16175 #endif
16176         } else
16177                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
16178
16179         /* Configure DMA attributes. */
16180         if (dma_mask > DMA_BIT_MASK(32)) {
16181                 err = pci_set_dma_mask(pdev, dma_mask);
16182                 if (!err) {
16183                         features |= NETIF_F_HIGHDMA;
16184                         err = pci_set_consistent_dma_mask(pdev,
16185                                                           persist_dma_mask);
16186                         if (err < 0) {
16187                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
16188                                         "DMA for consistent allocations\n");
16189                                 goto err_out_apeunmap;
16190                         }
16191                 }
16192         }
16193         if (err || dma_mask == DMA_BIT_MASK(32)) {
16194                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
16195                 if (err) {
16196                         dev_err(&pdev->dev,
16197                                 "No usable DMA configuration, aborting\n");
16198                         goto err_out_apeunmap;
16199                 }
16200         }
16201
16202         tg3_init_bufmgr_config(tp);
16203
16204         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
16205
16206         /* 5700 B0 chips do not support checksumming correctly due
16207          * to hardware bugs.
16208          */
16209         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
16210                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
16211
16212                 if (tg3_flag(tp, 5755_PLUS))
16213                         features |= NETIF_F_IPV6_CSUM;
16214         }
16215
16216         /* TSO is on by default on chips that support hardware TSO.
16217          * Firmware TSO on older chips gives lower performance, so it
16218          * is off by default, but can be enabled using ethtool.
16219          */
16220         if ((tg3_flag(tp, HW_TSO_1) ||
16221              tg3_flag(tp, HW_TSO_2) ||
16222              tg3_flag(tp, HW_TSO_3)) &&
16223             (features & NETIF_F_IP_CSUM))
16224                 features |= NETIF_F_TSO;
16225         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
16226                 if (features & NETIF_F_IPV6_CSUM)
16227                         features |= NETIF_F_TSO6;
16228                 if (tg3_flag(tp, HW_TSO_3) ||
16229                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
16230                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
16231                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
16232                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
16233                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
16234                         features |= NETIF_F_TSO_ECN;
16235         }
16236
16237         dev->features |= features;
16238         dev->vlan_features |= features;
16239
16240         /*
16241          * Add loopback capability only for a subset of devices that support
16242          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
16243          * loopback for the remaining devices.
16244          */
16245         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
16246             !tg3_flag(tp, CPMU_PRESENT))
16247                 /* Add the loopback capability */
16248                 features |= NETIF_F_LOOPBACK;
16249
16250         dev->hw_features |= features;
16251
16252         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
16253             !tg3_flag(tp, TSO_CAPABLE) &&
16254             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
16255                 tg3_flag_set(tp, MAX_RXPEND_64);
16256                 tp->rx_pending = 63;
16257         }
16258
16259         err = tg3_get_device_address(tp);
16260         if (err) {
16261                 dev_err(&pdev->dev,
16262                         "Could not obtain valid ethernet address, aborting\n");
16263                 goto err_out_apeunmap;
16264         }
16265
16266         /*
16267          * Reset chip in case UNDI or EFI driver did not shutdown
16268          * DMA self test will enable WDMAC and we'll see (spurious)
16269          * pending DMA on the PCI bus at that point.
16270          */
16271         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
16272             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
16273                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
16274                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16275         }
16276
16277         err = tg3_test_dma(tp);
16278         if (err) {
16279                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
16280                 goto err_out_apeunmap;
16281         }
16282
16283         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
16284         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
16285         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
16286         for (i = 0; i < tp->irq_max; i++) {
16287                 struct tg3_napi *tnapi = &tp->napi[i];
16288
16289                 tnapi->tp = tp;
16290                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
16291
16292                 tnapi->int_mbox = intmbx;
16293                 if (i <= 4)
16294                         intmbx += 0x8;
16295                 else
16296                         intmbx += 0x4;
16297
16298                 tnapi->consmbox = rcvmbx;
16299                 tnapi->prodmbox = sndmbx;
16300
16301                 if (i)
16302                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
16303                 else
16304                         tnapi->coal_now = HOSTCC_MODE_NOW;
16305
16306                 if (!tg3_flag(tp, SUPPORT_MSIX))
16307                         break;
16308
16309                 /*
16310                  * If we support MSIX, we'll be using RSS.  If we're using
16311                  * RSS, the first vector only handles link interrupts and the
16312                  * remaining vectors handle rx and tx interrupts.  Reuse the
16313                  * mailbox values for the next iteration.  The values we setup
16314                  * above are still useful for the single vectored mode.
16315                  */
16316                 if (!i)
16317                         continue;
16318
16319                 rcvmbx += 0x8;
16320
16321                 if (sndmbx & 0x4)
16322                         sndmbx -= 0x4;
16323                 else
16324                         sndmbx += 0xc;
16325         }
16326
16327         tg3_init_coal(tp);
16328
16329         pci_set_drvdata(pdev, dev);
16330
16331         if (tg3_flag(tp, 5717_PLUS)) {
16332                 /* Resume a low-power mode */
16333                 tg3_frob_aux_power(tp, false);
16334         }
16335
16336         tg3_timer_init(tp);
16337
16338         err = register_netdev(dev);
16339         if (err) {
16340                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
16341                 goto err_out_apeunmap;
16342         }
16343
16344         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
16345                     tp->board_part_number,
16346                     tp->pci_chip_rev_id,
16347                     tg3_bus_string(tp, str),
16348                     dev->dev_addr);
16349
16350         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
16351                 struct phy_device *phydev;
16352                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
16353                 netdev_info(dev,
16354                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
16355                             phydev->drv->name, dev_name(&phydev->dev));
16356         } else {
16357                 char *ethtype;
16358
16359                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
16360                         ethtype = "10/100Base-TX";
16361                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
16362                         ethtype = "1000Base-SX";
16363                 else
16364                         ethtype = "10/100/1000Base-T";
16365
16366                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
16367                             "(WireSpeed[%d], EEE[%d])\n",
16368                             tg3_phy_string(tp), ethtype,
16369                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
16370                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
16371         }
16372
16373         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
16374                     (dev->features & NETIF_F_RXCSUM) != 0,
16375                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
16376                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
16377                     tg3_flag(tp, ENABLE_ASF) != 0,
16378                     tg3_flag(tp, TSO_CAPABLE) != 0);
16379         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
16380                     tp->dma_rwctrl,
16381                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
16382                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
16383
16384         pci_save_state(pdev);
16385
16386         return 0;
16387
16388 err_out_apeunmap:
16389         if (tp->aperegs) {
16390                 iounmap(tp->aperegs);
16391                 tp->aperegs = NULL;
16392         }
16393
16394 err_out_iounmap:
16395         if (tp->regs) {
16396                 iounmap(tp->regs);
16397                 tp->regs = NULL;
16398         }
16399
16400 err_out_free_dev:
16401         free_netdev(dev);
16402
16403 err_out_power_down:
16404         pci_set_power_state(pdev, PCI_D3hot);
16405
16406 err_out_free_res:
16407         pci_release_regions(pdev);
16408
16409 err_out_disable_pdev:
16410         pci_disable_device(pdev);
16411         pci_set_drvdata(pdev, NULL);
16412         return err;
16413 }
16414
16415 static void tg3_remove_one(struct pci_dev *pdev)
16416 {
16417         struct net_device *dev = pci_get_drvdata(pdev);
16418
16419         if (dev) {
16420                 struct tg3 *tp = netdev_priv(dev);
16421
16422                 release_firmware(tp->fw);
16423
16424                 tg3_reset_task_cancel(tp);
16425
16426                 if (tg3_flag(tp, USE_PHYLIB)) {
16427                         tg3_phy_fini(tp);
16428                         tg3_mdio_fini(tp);
16429                 }
16430
16431                 unregister_netdev(dev);
16432                 if (tp->aperegs) {
16433                         iounmap(tp->aperegs);
16434                         tp->aperegs = NULL;
16435                 }
16436                 if (tp->regs) {
16437                         iounmap(tp->regs);
16438                         tp->regs = NULL;
16439                 }
16440                 free_netdev(dev);
16441                 pci_release_regions(pdev);
16442                 pci_disable_device(pdev);
16443                 pci_set_drvdata(pdev, NULL);
16444         }
16445 }
16446
16447 #ifdef CONFIG_PM_SLEEP
16448 static int tg3_suspend(struct device *device)
16449 {
16450         struct pci_dev *pdev = to_pci_dev(device);
16451         struct net_device *dev = pci_get_drvdata(pdev);
16452         struct tg3 *tp = netdev_priv(dev);
16453         int err;
16454
16455         if (!netif_running(dev))
16456                 return 0;
16457
16458         tg3_reset_task_cancel(tp);
16459         tg3_phy_stop(tp);
16460         tg3_netif_stop(tp);
16461
16462         tg3_timer_stop(tp);
16463
16464         tg3_full_lock(tp, 1);
16465         tg3_disable_ints(tp);
16466         tg3_full_unlock(tp);
16467
16468         netif_device_detach(dev);
16469
16470         tg3_full_lock(tp, 0);
16471         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
16472         tg3_flag_clear(tp, INIT_COMPLETE);
16473         tg3_full_unlock(tp);
16474
16475         err = tg3_power_down_prepare(tp);
16476         if (err) {
16477                 int err2;
16478
16479                 tg3_full_lock(tp, 0);
16480
16481                 tg3_flag_set(tp, INIT_COMPLETE);
16482                 err2 = tg3_restart_hw(tp, 1);
16483                 if (err2)
16484                         goto out;
16485
16486                 tg3_timer_start(tp);
16487
16488                 netif_device_attach(dev);
16489                 tg3_netif_start(tp);
16490
16491 out:
16492                 tg3_full_unlock(tp);
16493
16494                 if (!err2)
16495                         tg3_phy_start(tp);
16496         }
16497
16498         return err;
16499 }
16500
16501 static int tg3_resume(struct device *device)
16502 {
16503         struct pci_dev *pdev = to_pci_dev(device);
16504         struct net_device *dev = pci_get_drvdata(pdev);
16505         struct tg3 *tp = netdev_priv(dev);
16506         int err;
16507
16508         if (!netif_running(dev))
16509                 return 0;
16510
16511         netif_device_attach(dev);
16512
16513         tg3_full_lock(tp, 0);
16514
16515         tg3_flag_set(tp, INIT_COMPLETE);
16516         err = tg3_restart_hw(tp, 1);
16517         if (err)
16518                 goto out;
16519
16520         tg3_timer_start(tp);
16521
16522         tg3_netif_start(tp);
16523
16524 out:
16525         tg3_full_unlock(tp);
16526
16527         if (!err)
16528                 tg3_phy_start(tp);
16529
16530         return err;
16531 }
16532
16533 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
16534 #define TG3_PM_OPS (&tg3_pm_ops)
16535
16536 #else
16537
16538 #define TG3_PM_OPS NULL
16539
16540 #endif /* CONFIG_PM_SLEEP */
16541
16542 /**
16543  * tg3_io_error_detected - called when PCI error is detected
16544  * @pdev: Pointer to PCI device
16545  * @state: The current pci connection state
16546  *
16547  * This function is called after a PCI bus error affecting
16548  * this device has been detected.
16549  */
16550 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
16551                                               pci_channel_state_t state)
16552 {
16553         struct net_device *netdev = pci_get_drvdata(pdev);
16554         struct tg3 *tp = netdev_priv(netdev);
16555         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
16556
16557         netdev_info(netdev, "PCI I/O error detected\n");
16558
16559         rtnl_lock();
16560
16561         if (!netif_running(netdev))
16562                 goto done;
16563
16564         tg3_phy_stop(tp);
16565
16566         tg3_netif_stop(tp);
16567
16568         tg3_timer_stop(tp);
16569
16570         /* Want to make sure that the reset task doesn't run */
16571         tg3_reset_task_cancel(tp);
16572
16573         netif_device_detach(netdev);
16574
16575         /* Clean up software state, even if MMIO is blocked */
16576         tg3_full_lock(tp, 0);
16577         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
16578         tg3_full_unlock(tp);
16579
16580 done:
16581         if (state == pci_channel_io_perm_failure)
16582                 err = PCI_ERS_RESULT_DISCONNECT;
16583         else
16584                 pci_disable_device(pdev);
16585
16586         rtnl_unlock();
16587
16588         return err;
16589 }
16590
16591 /**
16592  * tg3_io_slot_reset - called after the pci bus has been reset.
16593  * @pdev: Pointer to PCI device
16594  *
16595  * Restart the card from scratch, as if from a cold-boot.
16596  * At this point, the card has exprienced a hard reset,
16597  * followed by fixups by BIOS, and has its config space
16598  * set up identically to what it was at cold boot.
16599  */
16600 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16601 {
16602         struct net_device *netdev = pci_get_drvdata(pdev);
16603         struct tg3 *tp = netdev_priv(netdev);
16604         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16605         int err;
16606
16607         rtnl_lock();
16608
16609         if (pci_enable_device(pdev)) {
16610                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16611                 goto done;
16612         }
16613
16614         pci_set_master(pdev);
16615         pci_restore_state(pdev);
16616         pci_save_state(pdev);
16617
16618         if (!netif_running(netdev)) {
16619                 rc = PCI_ERS_RESULT_RECOVERED;
16620                 goto done;
16621         }
16622
16623         err = tg3_power_up(tp);
16624         if (err)
16625                 goto done;
16626
16627         rc = PCI_ERS_RESULT_RECOVERED;
16628
16629 done:
16630         rtnl_unlock();
16631
16632         return rc;
16633 }
16634
16635 /**
16636  * tg3_io_resume - called when traffic can start flowing again.
16637  * @pdev: Pointer to PCI device
16638  *
16639  * This callback is called when the error recovery driver tells
16640  * us that its OK to resume normal operation.
16641  */
16642 static void tg3_io_resume(struct pci_dev *pdev)
16643 {
16644         struct net_device *netdev = pci_get_drvdata(pdev);
16645         struct tg3 *tp = netdev_priv(netdev);
16646         int err;
16647
16648         rtnl_lock();
16649
16650         if (!netif_running(netdev))
16651                 goto done;
16652
16653         tg3_full_lock(tp, 0);
16654         tg3_flag_set(tp, INIT_COMPLETE);
16655         err = tg3_restart_hw(tp, 1);
16656         if (err) {
16657                 tg3_full_unlock(tp);
16658                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16659                 goto done;
16660         }
16661
16662         netif_device_attach(netdev);
16663
16664         tg3_timer_start(tp);
16665
16666         tg3_netif_start(tp);
16667
16668         tg3_full_unlock(tp);
16669
16670         tg3_phy_start(tp);
16671
16672 done:
16673         rtnl_unlock();
16674 }
16675
16676 static const struct pci_error_handlers tg3_err_handler = {
16677         .error_detected = tg3_io_error_detected,
16678         .slot_reset     = tg3_io_slot_reset,
16679         .resume         = tg3_io_resume
16680 };
16681
16682 static struct pci_driver tg3_driver = {
16683         .name           = DRV_MODULE_NAME,
16684         .id_table       = tg3_pci_tbl,
16685         .probe          = tg3_init_one,
16686         .remove         = tg3_remove_one,
16687         .err_handler    = &tg3_err_handler,
16688         .driver.pm      = TG3_PM_OPS,
16689 };
16690
16691 static int __init tg3_init(void)
16692 {
16693         return pci_register_driver(&tg3_driver);
16694 }
16695
16696 static void __exit tg3_cleanup(void)
16697 {
16698         pci_unregister_driver(&tg3_driver);
16699 }
16700
16701 module_init(tg3_init);
16702 module_exit(tg3_cleanup);